blob: 337c8a4457480e72266a05d895e7165340ebb5ca [file] [log] [blame]
Jeremy Johnson015c3552022-02-23 12:15:03 +00001#!/usr/bin/env python3
2# Copyright (c) 2020-2022, ARM Limited.
3# SPDX-License-Identifier: Apache-2.0
4import argparse
5import glob
6import json
7import math
8import os
9import queue
10import re
11import sys
12import threading
13import traceback
14from datetime import datetime
15from enum import IntEnum
16from enum import unique
17
18import numpy as np
19from checker.tosa_result_checker import LogColors
20from checker.tosa_result_checker import print_color
21from checker.tosa_result_checker import set_print_in_color
22from runner.run_command import run_sh_command
23from xunit.xunit import xunit_results
24from xunit.xunit import xunit_test
25
26
27def parse_args():
28 parser = argparse.ArgumentParser()
29 parser.add_argument(
30 "-t", "--test", dest="test", type=str, nargs="+", help="Test(s) to run"
31 )
32 parser.add_argument(
33 "-r",
34 "--recursive",
35 dest="recursive_tests",
36 action="store_true",
37 help="Recursively search for tests",
38 )
39 parser.add_argument(
40 "--tf-base-dir",
41 dest="tf_base_dir",
42 type=str,
43 required=True,
44 help="Tensorflow/MLIR base directory",
45 )
46 parser.add_argument(
47 "--tools-base-dir",
48 dest="tools_base_dir",
49 type=str,
50 required=True,
51 help="Reference model base directory",
52 )
53 parser.add_argument(
54 "-v", "--verbose", dest="verbose", action="count", help="Verbose run"
55 )
56 parser.add_argument(
57 "-dref",
58 "--debug-ref-model",
59 dest="debug_ref_model",
60 action="store_true",
61 help="Enable TOSA Reference model debugging",
62 )
63 parser.add_argument(
64 "--tolerance",
65 dest="tolerance",
66 default=1e-3,
67 type=float,
68 help="Comparison tolerance b value",
69 )
70 parser.add_argument(
71 "--no-compiler",
72 dest="no_compiler",
73 action="store_true",
74 help="Do not run TF MLIR/tfopt/TOSA compiler. Just run TOSA Reference model",
75 )
76 parser.add_argument(
77 "--no-ref-model",
78 dest="no_ref",
79 action="store_true",
80 help="Do not run TOSA reference model, just run TF MLIR/tfopt/TOSA compiler.",
81 )
82 parser.add_argument(
83 "--valgrind",
84 dest="valgrind",
85 action="store_true",
86 help="Enable valgrind on TOSA Reference Model",
87 )
88 parser.add_argument(
89 "-j", "--jobs", dest="jobs", type=int, default=1, help="Number of parallel jobs"
90 )
91 parser.add_argument(
92 "--no-color",
93 "--no-colour",
94 dest="no_color",
95 action="store_true",
96 help="Disable color output",
97 )
98 parser.add_argument(
99 "-f",
100 "--framework",
101 dest="framework",
102 default=[],
103 action="append",
104 help="Frameworks to test (tf, tflite)",
105 )
106 parser.add_argument(
107 "--override-exclusions",
108 dest="override_exclusions",
109 default=False,
110 action="store_true",
111 help="Ignore the framework exclusions listed in the test JSON",
112 )
113 parser.add_argument(
114 "--xunit-file",
115 dest="xunit_file",
116 type=str,
117 default="result.xml",
118 help="XUnit result output file",
119 )
120 parser.add_argument(
121 "--xunit-classname-prefix",
122 dest="xunit_classname_prefix",
123 default="TFUnitTests",
124 help="Prefix for xunit classname",
125 )
126 parser.add_argument(
127 "--hex-bool-hack",
128 dest="hex_bool_hack",
129 default=1,
130 type=int,
131 help=(
132 "Hack around bug in MLIR hex parsing for boolean types"
133 " by disabling hex encoding"
134 ),
135 )
136 parser.add_argument(
137 "--regression-mode",
138 dest="regression_mode",
139 default=False,
140 action="store_true",
141 help="Options to make the script more friendly for jenkins regressions",
142 )
143 parser.add_argument(
144 "--quantize-tolerance",
145 dest="quantize_tolerance",
146 default=0,
147 type=int,
148 help=(
149 "Tolerance when comparing TOSA reference model result"
150 " to TensorFlow Lite reference"
151 ),
152 )
153 parser.add_argument(
154 "--test-dir",
155 dest="test_dir",
156 default="",
157 help="Path to prepend to paths in test.json",
158 )
159
160 parser.add_argument(
161 "-o", "--output", dest="output_file", help="Redirect script output to a file"
162 )
163
164 args = parser.parse_args()
165
166 # No easy way to both do array append and override a default value
167 if not args.framework:
168 args.framework = ["tf", "tflite"]
169
170 # Autodetect CPU count
171 if args.jobs <= 0:
172 args.jobs = os.cpu_count()
173
174 return args
175
176
177@unique
178class TestResult(IntEnum):
179 PASS = 0
180 COMPILER_ERROR = 1
181 REF_MODEL_ERROR = 2
182 REF_MODEL_UNPREDICTABLE = 3
183 REF_MODEL_RUNTIME_ERROR = 4
184 MISMATCH = 5
185 NOT_LOWERED = 6
186 INVALID_MLIR = 7
187 INTERNAL_ERROR = 8
188 SKIPPED = 9
189
190
191TestResultErrorStr = [
192 "",
193 "Compiler error",
194 "Reference model error",
195 "Reference model unpredictable",
196 "Reference model runtime error",
197 "Mismatch",
198 "Not lowered",
199 "Invalid MLIR",
200 "Internal error",
201 "",
202]
203
204
205def parse_compiler_output(compiler_stdout, compiler_stderr):
206 # Look for "has not been lowered yet, skipped" strings in stdout
207 expr = re.compile(".* has not been lowered yet, skipped.*")
208
209 for line in compiler_stdout.splitlines():
210 if expr.match(line):
211 return TestResult.NOT_LOWERED
212
213 return TestResult.PASS
214
215
216def parse_reference_model_output(ref_model_stdout, ref_model_stderr):
217 # Look for "has not been lowered yet, skipped" strings in stdout
218 unpredictable_expr = re.compile(r".*UNPREDICTABLE.*")
219 error_expr = re.compile(".* Graph result: ERROR.*")
220 unknown_expr = re.compile(".* Unknown graph status code.*")
221
222 for line in ref_model_stderr.splitlines():
223 if unpredictable_expr.match(line):
224 return TestResult.REF_MODEL_UNPREDICTABLE
225 elif error_expr.match(line):
226 return TestResult.REF_MODEL_ERROR
227 elif unknown_expr.match(line):
228 return TestResult.REF_MODEL_RUNTIME_ERROR
229
230 return TestResult.PASS
231
232
233# write a self-contained test descriptor in json format
234def write_reference_runner_json(
235 filename,
236 tosa_filename,
237 ifm_name,
238 ifm_file,
239 ofm_name,
240 ofm_file,
241 expected_failure=False,
242):
243 """Write a json test file so that it is fairly easy to pick up the test
244 and generate commands for third party tool"""
245 test_desc = dict()
246
247 test_desc["tosa_file"] = tosa_filename
248 test_desc["ifm_name"] = ifm_name
249 test_desc["ifm_file"] = ifm_file
250 test_desc["ofm_name"] = ofm_name
251 test_desc["ofm_file"] = ofm_file
252 test_desc["expected_failure"] = expected_failure
253
254 with open(filename, "w") as f:
255 json.dump(test_desc, f, indent=" ")
256
257
258def run_test(args, test, framework):
259
260 # parse test_name from test directory path
261 test_path = test.split("/")
262 test_name = None
263 for t in test_path[::-1]:
264 if len(t) != 0:
265 test_name = t
266 break
267 if not test_name:
268 raise Exception("Could not parse test_name from {}".format(test))
269
270 print_color(LogColors.GREEN, "## Running {} test {}".format(framework, test_name))
271
272 msg = ""
273
274 try:
275 with open(os.path.join(test, "test.json"), "r") as f:
276 test_desc = json.load(f)
277 except Exception:
278 raise Exception(
279 "Could not load or parse test from {}".format(
280 os.path.join(test, "test.json")
281 )
282 )
283
284 try:
285 if not args.override_exclusions:
286 for excl in test_desc["framework_exclusions"]:
287 if excl == framework:
288 print_color(LogColors.GREEN, "Results SKIPPED")
289 return (TestResult.SKIPPED, 0.0, "")
290 except KeyError:
291 pass
292
293 tf_tools_dir = os.path.abspath(
294 "{}/bazel-bin/tensorflow/compiler/mlir".format(args.tf_base_dir)
295 )
296
297 pre_opt_filename = os.path.join(test, "test_{}.preopt.mlir".format(framework))
298 post_opt_filename = os.path.join(test, "test_{}.postopt.mlir".format(framework))
299 if args.test_dir:
300 test_path_prepend = args.test_dir
301 else:
302 test_path_prepend = test
303
304 # 1. Framework to MLIR translator command
305 if framework == "tf":
306 if test_desc["tf_model_filename"].endswith(".mlir"):
307 pre_opt_filename = test_desc["tf_model_filename"]
308 translate_mlir_cmd = []
309 else:
310 translate_mlir_cmd = [
311 os.path.join(tf_tools_dir, "tf-mlir-translate"),
312 "--graphdef-to-mlir",
313 "--tf-enable-shape-inference-on-import",
314 "--tf-output-arrays={}".format(test_desc["tf_result_name"]),
315 os.path.join(test_path_prepend, test_desc["tf_model_filename"]),
316 "-o",
317 pre_opt_filename,
318 ]
319 elif framework == "tflite":
320 if test_desc["tflite_model_filename"].endswith(".mlir"):
321 pre_opt_filename = test_desc["tflite_model_filename"]
322 translate_mlir_cmd = []
323 else:
324 translate_mlir_cmd = [
325 os.path.join(tf_tools_dir, "lite", "flatbuffer_translate"),
326 "--tflite-flatbuffer-to-mlir",
327 os.path.join(test_path_prepend, test_desc["tflite_model_filename"]),
328 "--output-arrays={}".format(test_desc["tflite_result_name"]),
329 "-o",
330 pre_opt_filename,
331 ]
332 else:
333 raise Exception("Unknown framwork: {}".format(framework))
334
335 # Any additional inputs to the translator?
336 input_tensor_prefix = "TosaInput_"
337 flatbuffer_dir = "flatbuffer-{}".format(framework)
338 mlir_opts = []
339
340 # Temporary hack: MLIR's new hex encoding of large tensors does not work for
341 # boolean types
342 # for TF hash 8e8041d594a888eb67eafa5cc62627d7e9ca8082
343 if test.endswith("_bool") and args.hex_bool_hack:
344 mlir_opts.append("--mlir-print-elementsattrs-with-hex-if-larger=-1")
345
346 try:
347 # specify input tensors if test is generated from .pb
348 if framework == "tf":
349 # Convert the shape to a mlir-friendly string
350 shapes = []
351 for curr_shape in test_desc["ifm_shape"]:
352 shape_str = ""
353 for dim in curr_shape:
354 shape_str = shape_str + str(dim) + ","
355 shapes.append(shape_str)
356
357 translate_mlir_cmd.extend(
358 ["--tf-input-arrays", ",".join(test_desc["ifm_name"])]
359 )
360 translate_mlir_cmd.extend(["--tf-input-shapes", ":".join(shapes)])
361
362 # Write the hard-coded placeholder input (reshaped as necesary) to
363 # the file that compiler specified.
364 reference_runner_ifm_name = []
365 for i in range(len(test_desc["ifm_file"])):
366
367 ifm_tensor_name = "{}{}".format(input_tensor_prefix, i)
368
369 assert test_desc["ifm_file"][i].endswith(".npy")
370 ifm_np = np.load(os.path.join(test, test_desc["ifm_file"][i]))
371 # Make sure input numpy and input shape from descriptor match
372 assert list(ifm_np.shape) == test_desc["ifm_shape"][i]
373
374 reference_runner_ifm_name.append(ifm_tensor_name)
375
376 except KeyError:
377 # No additional inputs. Ignore.
378 pass
379
380 tf_opt_cmd = [
381 os.path.join(tf_tools_dir, "tf-opt"),
382 "--tf-executor-to-functional-conversion",
383 "--verify-each",
384 pre_opt_filename,
385 "-o",
386 post_opt_filename,
387 ]
388
389 translate_mlir_cmd.extend(mlir_opts)
390 tf_opt_cmd.extend(mlir_opts)
391
392 compiler_cmd = [os.path.join(tf_tools_dir, "tf-opt")]
393
394 if framework == "tf":
395 compiler_cmd.append("--tf-to-tosa-pipeline")
396 elif framework == "tflite":
397 compiler_cmd.append("--tfl-to-tosa-pipeline")
398 compiler_cmd.append("--tosa-strip-quant-types")
399
400 tosa_mlir_filename = os.path.join(test, "output_{}.tosa.mlir".format(framework))
401
402 flatbuffer_dir_fullpath = os.path.join(test, flatbuffer_dir)
403
404 os.makedirs(flatbuffer_dir_fullpath, exist_ok=True)
405
406 compiler_cmd.extend(
407 [
408 "--verify-each",
409 post_opt_filename,
410 "-o",
411 tosa_mlir_filename,
412 "--tosa-serialize",
413 "--tosa-flatbuffer-filename={}".format(
414 os.path.join(flatbuffer_dir_fullpath, "{}.tosa".format(test_name))
415 ),
416 ]
417 )
418
419 if not args.no_compiler:
420 try:
421 if translate_mlir_cmd:
422 run_sh_command(translate_mlir_cmd, args.verbose, True)
423 if tf_opt_cmd:
424 run_sh_command(tf_opt_cmd, args.verbose, True)
425 except Exception as e:
426 print_color(
427 LogColors.RED, "Results INVALID_MLIR {}: {}".format(test_name, e)
428 )
429 return (TestResult.INVALID_MLIR, 0.0, e)
430
431 try:
432
433 compiler_stdout, compiler_stderr = run_sh_command(
434 compiler_cmd, args.verbose, True
435 )
436 compiler_rc = parse_compiler_output(compiler_stdout, compiler_stderr)
437 if compiler_rc == TestResult.NOT_LOWERED:
438 print_color(
439 LogColors.RED,
440 "Results NOT_LOWERED {}, framework {}".format(test_name, framework),
441 )
442 return (TestResult.NOT_LOWERED, 0.0, "")
443
444 pass
445
446 except Exception as e:
447 if "same scale constraint" in str(e):
448 print_color(
449 LogColors.RED, "Results INVALID_MLIR {}: {}".format(test_name, e)
450 )
451 return (TestResult.INVALID_MLIR, 0.0, e)
452 else:
453 print_color(
454 LogColors.RED, "Results COMPILER_ERROR {}: {}".format(test_name, e)
455 )
456 return (TestResult.COMPILER_ERROR, 0.0, e)
457
458 if framework == "tf":
459 try:
460 tf_result = np.load(os.path.join(test, test_desc["tf_result_npy_filename"]))
461 except KeyError:
462 assert 0, "fail to load tf result numpy"
463 elif framework == "tflite":
464 try:
465 tf_result = np.load(
466 os.path.join(test, test_desc["tflite_result_npy_filename"])
467 )
468 except KeyError:
469 assert 0, "fail to load tflite result numpy"
470
471 # Generate test descriptor per flatbuffer generation
472 # Input .npy will be shared across different frameworks
473 # Output .npy will be generated in its corresponding flatbuffer
474 reference_runner_ifm_file = [
475 os.path.join("..", ifm_file) for ifm_file in test_desc["ifm_file"]
476 ]
477
478 # Check if there's any operator in output graph.
479 empty_graph = True
480 with open(tosa_mlir_filename, "r") as f:
481 for line in f:
482 if re.search('"tosa.*"', line):
483 empty_graph = False
484
485 break
486
487 # Fast-forward input tensor to output tensor if TOSA graph is empty.
488 if empty_graph:
489 reference_runner_ofm_name = reference_runner_ifm_name
490 else:
491 reference_runner_ofm_name = ["TosaOutput_0"]
492
493 write_reference_runner_json(
494 filename=os.path.join(test, flatbuffer_dir, "desc.json"),
495 tosa_filename="{}.tosa".format(test_name),
496 ifm_name=reference_runner_ifm_name,
497 ifm_file=reference_runner_ifm_file,
498 ofm_name=reference_runner_ofm_name,
499 ofm_file=["ref_model_output_0.npy"],
500 )
501
502 ref_model_cmd = [
503 os.path.join(
504 args.tools_base_dir, "build", "reference_model", "tosa_reference_model"
505 ),
506 "-Ctest_desc={}".format(os.path.join(test, flatbuffer_dir, "desc.json")),
507 ]
508
509 if args.debug_ref_model:
510 ref_model_cmd.extend(["-DALL", "-lhigh"])
511
512 if args.valgrind:
513 ref_model_cmd = [
514 "valgrind",
515 "--show-leak-kinds=all",
516 "--log-fd=1",
517 "-q",
518 ] + ref_model_cmd
519
520 # Clean out any ref_model result first
521 try:
522 os.remove(os.path.join(test, flatbuffer_dir, "ref_model_*.npy"))
523 except FileNotFoundError:
524 pass
525
526 if not args.no_ref:
527 try:
528 ref_model_stdout, ref_model_stderr = run_sh_command(
529 ref_model_cmd, args.verbose, True
530 )
531 ref_model_rc = parse_reference_model_output(
532 ref_model_stdout, ref_model_stderr
533 )
534 if ref_model_rc != TestResult.PASS:
535 return (ref_model_rc, 0.0, "")
536 except Exception as e:
537 ref_model_rc = parse_reference_model_output("", str(e))
538 if ref_model_rc != TestResult.PASS:
539 print_color(
540 LogColors.RED,
541 "Results {} {}: {}".format(
542 TestResultErrorStr[ref_model_rc], test_name, e
543 ),
544 )
545 return (ref_model_rc, 0.0, "")
546 print_color(
547 LogColors.RED,
548 "Results REF_MODEL_RUNTIME_ERROR {}: {}".format(test_name, e),
549 )
550 return (TestResult.REF_MODEL_RUNTIME_ERROR, 0.0, e)
551
552 if tf_result.dtype == np.float16:
553 tf_result = tf_result.astype(np.float32)
554 elif (
555 tf_result.dtype == np.uint8
556 or tf_result.dtype == np.int8
557 or tf_result.dtype == np.int16
558 or tf_result.dtype == np.int64
559 ):
560 tf_result = tf_result.astype(np.int32)
561
562 # For now, search for the output from ref_model
563 ref_model_result_files = glob.glob(
564 os.path.join(test, flatbuffer_dir, "ref_model_*.npy")
565 )
566 ref_model_result = np.load(ref_model_result_files[0])
567
568 assert (
569 tf_result.dtype == ref_model_result.dtype
570 ), "Numpy type mismatch {} != {} when comparing result".format(
571 tf_result.dtype, ref_model_result.dtype
572 )
573
574 # Size comparison
575 # Size = 1 tensors can be equivalently represented as having rank 0 or rank
576 # >= 0, allow that special case
577 tf_result = np.squeeze(tf_result)
578 ref_model_result = np.squeeze(ref_model_result)
579
580 if np.shape(tf_result) != np.shape(ref_model_result):
581 print_color(LogColors.RED, "Results MISCOMPARE {}".format(test_name))
582 msg = "Shapes mismatch: Reference {} vs {}".format(
583 np.shape(tf_result), np.shape(ref_model_result)
584 )
585 print(msg)
586 return (TestResult.MISMATCH, 0.0, msg)
587
588 # for quantized test, allow +-(args.quantize_tolerance) error
589 if ref_model_result.dtype == np.int32:
590 assert tf_result.dtype == np.int32
591
592 if np.all(np.absolute(ref_model_result - tf_result) <= args.quantize_tolerance):
593 print_color(LogColors.GREEN, "Results PASS {}".format(test_name))
594 else:
595 print_color(LogColors.RED, "Results MISCOMPARE {}".format(test_name))
596
597 tolerance = args.quantize_tolerance + 1
598 while not np.all(
599 np.absolute(ref_model_result - tf_result) <= args.quantize_tolerance
600 ):
601 tolerance = tolerance + 1
602 if tolerance >= 10:
603 break
604
605 msg = "Result is within {} {}".format(tolerance, test)
606 print(msg)
607
608 np.set_printoptions(threshold=128)
609 print("tf_result: {}\n".format(tf_result.shape))
610 print(tf_result)
611 print("ref_model_result: {}\n".format(ref_model_result.shape))
612 print(ref_model_result)
613 # print(tf_result - ref_model_result)
614 return (TestResult.MISMATCH, tolerance, msg)
615 else:
616 if np.allclose(
617 ref_model_result, tf_result, atol=args.tolerance, equal_nan=True
618 ):
619 print_color(LogColors.GREEN, "Results PASS {}".format(test_name))
620 else:
621 print_color(LogColors.RED, "Results MISCOMPARE {}".format(test_name))
622
623 # Many of these tests would match with a reasonable looser tolerence.
624 # Determine what would have worked.
625 tolerance = args.tolerance * 10.0
626 while not np.allclose(
627 ref_model_result, tf_result, atol=tolerance, equal_nan=True
628 ):
629 tolerance = tolerance * 10.0
630 if tolerance > 1.0e10:
631 tolerance = math.inf
632 break
633
634 msg = "Result is within {:.0e} {}".format(tolerance, test_name)
635 print(msg)
636
637 np.set_printoptions(precision=4, threshold=128)
638 print("tf_result: {}\n".format(tf_result.shape))
639 print(tf_result)
640 print("ref_model_result: {}\n".format(ref_model_result.shape))
641 print(ref_model_result)
642 # print(tf_result - ref_model_result)
643 return (TestResult.MISMATCH, tolerance, msg)
644
645 return (TestResult.PASS, args.tolerance, msg)
646
647
648def worker_thread(task_queue, args, result_queue):
649 while True:
650 try:
651 (test, framework) = task_queue.get(block=False)
652 except queue.Empty:
653 break
654
655 if test is None:
656 break
657
658 msg = ""
659 start_time = datetime.now()
660 try:
661 (rc, tolerance, msg) = run_test(args, test, framework)
662 except Exception as e:
663 print("Internal regression error: {}".format(e))
664 print(
665 "".join(
666 traceback.format_exception(
667 etype=type(e), value=e, tb=e.__traceback__
668 )
669 )
670 )
671 rc = TestResult.INTERNAL_ERROR
672 tolerance = 0.0
673
674 end_time = datetime.now()
675
676 result_queue.put((test, framework, rc, tolerance, msg, end_time - start_time))
677 task_queue.task_done()
678
679 return True
680
681
682def getTestsInDir(directory):
683 # Recursively find any tests in this directory
684 if os.path.isfile(os.path.join(directory, "test.json")):
685 return [directory]
686 elif os.path.isdir(directory):
687 test_list = []
688 for d in glob.glob(os.path.join(directory, "*")):
689 test_list.extend(getTestsInDir(d))
690 return test_list
691 else:
692 return []
693
694
695def main():
696 args = parse_args()
697
698 set_print_in_color(not args.no_color)
699
700 if args.output_file:
701 set_print_in_color(False)
702 sys.stdout = open(args.output_file, "w")
703
704 # Disable TF info messages
705 os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
706
707 task_queue = queue.Queue()
708 result_queue = queue.Queue()
709
710 threads = []
711
712 # Result counters for each of the TestResult return codes
713 results = [0] * len(TestResult)
714
715 for tdir in args.test:
716
717 if args.recursive_tests:
718 tdirList = getTestsInDir(tdir)
719 else:
720 tdirList = [tdir]
721
722 for t in tdirList:
723 for f in args.framework:
724 task_queue.put((t, f))
725
726 for i in range(args.jobs):
727 t = threading.Thread(
728 target=worker_thread, args=(task_queue, args, result_queue)
729 )
730 t.setDaemon(True)
731 t.start()
732 threads.append(t)
733
734 # Run until queue is empty
735 task_queue.join()
736
737 print_color(LogColors.BOLD_WHITE, "Result summary")
738
739 result_list = []
740 while True:
741 try:
742 test, framework, rc, tol, msg, time_delta = result_queue.get(block=False)
743 except queue.Empty:
744 break
745
746 result_list.append((test, framework, rc, tol, msg, time_delta))
747 results[rc] = results[rc] + 1
748
749 xunit_result = xunit_results()
750 xunit_suite = xunit_result.create_suite(args.xunit_classname_prefix)
751
752 # Sort by test name
753 for test, framework, rc, tol, err_msg, time_delta in sorted(
754 result_list, key=lambda tup: tup[0]
755 ):
756
757 test_name = os.path.basename(test)
758 class_name = f"{args.xunit_classname_prefix}.{framework}"
759
760 xt = xunit_test(test_name, class_name)
761
762 msg = TestResultErrorStr[rc]
763
764 xt.time = str(
765 float(time_delta.seconds) + (float(time_delta.microseconds) * 1e-6)
766 )
767
768 if len(msg) > 0:
769 print("{} on {} {}".format(msg, framework, test))
770
771 # Add any more verbose messaging for the xml log
772 if err_msg:
773 msg = "{} {}".format(msg, err_msg)
774
775 if rc == TestResult.PASS:
776 pass
777 elif rc == TestResult.SKIPPED:
778 xt.skipped()
779 else:
780 xt.failed(msg)
781
782 xunit_suite.tests.append(xt)
783
784 result_queue.task_done()
785
786 xunit_result.write_results(args.xunit_file)
787
788 print("Totals: ", end="")
789 for result in TestResult:
790 print("{} {}, ".format(results[result], result.name.lower()), end="")
791 print()
792
793 if not args.regression_mode and (
794 results[TestResult.COMPILER_ERROR] > 0
795 or results[TestResult.REF_MODEL_ERROR] > 0
796 or results[TestResult.MISMATCH] > 0
797 ):
798 return 1
799
800 return 0
801
802
803if __name__ == "__main__":
804 exit(main())