blob: c057b7307af61650a834e3eebf3295a328c570f0 [file] [log] [blame]
Jeremy Johnson0ecfa372022-06-30 14:27:56 +01001#!/usr/bin/env python3
2# Copyright (c) 2021-2022, ARM Limited.
3# SPDX-License-Identifier: Apache-2.0
4"""Build conformance tests.
5
6Steps:
7- Specific input shapes (or tests) are specified and produced by using the
8 settings in the .json files.
9- Tests are selected to produce a good coverage.
10- Tests are run on the reference model to produce the correct output files.
11- Tests are converted into JSON format and saved to desired output directory.
12"""
13import argparse
14import json
15import logging
16import multiprocessing as mp
17import os
18import shlex
19import shutil
20import subprocess
21from functools import partial
22from itertools import tee
23from pathlib import Path
24
25from conformance.test_select import Operator
26from convert2conformance.convert2conformance import main as c2c_main
27from distutils.dir_util import copy_tree
28
29logging.basicConfig()
30logger = logging.getLogger("tosa_verif_conformance_generator")
31
32# Configuration for each TOSA profile
33PROFILE_OPS_INFO = {
Jeremy Johnson88588622022-07-12 16:42:29 +010034 "tosa-bi": {
Jeremy Johnson0ecfa372022-06-30 14:27:56 +010035 "operator_test_params": "tosa_base_profile_ops_info.json",
36 "framework_tests": "tosa_base_profile_framework_ops_info.json",
37 "exclude_types": ["float"],
38 }
39}
40
41LOCATION_REF_MODEL_BINARY = Path("build/reference_model/tosa_reference_model")
42
43
44class GenConformanceError(Exception):
45 """Generation error reporting exception."""
46
47 pass
48
49
50def _run_sh_command(args, cwd, full_cmd):
51 """Run an external command and capture stdout/stderr."""
52 # Quote the command line for printing
53 full_cmd_esc = [shlex.quote(x) for x in full_cmd]
54 if args.capture_output:
55 logger.debug(f"Command: {full_cmd_esc}")
56
57 rc = subprocess.run(
58 full_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd
59 )
60
61 if args.capture_output:
62 stdout = rc.stdout.decode("utf-8")
63 logger.debug(f"stdout: \n{stdout}")
64 if rc.returncode != 0:
65
66 raise Exception(
67 "Error running command: {}.\n{}".format(
68 " ".join(full_cmd_esc), rc.stderr.decode("utf-8")
69 )
70 )
71 return (rc.stdout, rc.stderr)
72
73
74def build_op_tests(args, operator, test_params):
75 """Build tests for a given operator.
76
77 Builds a set of tests based on the parameters defined in test_params
78
79 Returns operator output directory
80 """
81 assert operator in test_params
82
83 build_tests_cmd = "tosa_verif_build_tests"
84 op_build_dir = args.build_dir
85
86 ref_cmd_base = [
87 build_tests_cmd,
88 "--filter",
89 operator,
90 "-o",
91 str(op_build_dir),
92 "--seed",
93 "42",
94 ]
95
96 ref_cmds = []
97
98 if args.test_type in ["positive", "both"]:
99 # Append extra parameters and run test generator for each set of parameters.
100 for arglist in test_params[operator]["generator_args"]:
101 ref_cmd_pos_test = ref_cmd_base.copy()
102 ref_cmd_pos_test.extend(arglist)
103 ref_cmds.append(ref_cmd_pos_test)
104
105 if args.test_type in ["negative", "both"]:
106 ref_cmd_neg_test = ref_cmd_base.copy()
107 ref_cmd_neg_test.extend(["--test-type", "negative"])
108 ref_cmds.append(ref_cmd_neg_test)
109
110 logger.debug(f"Creating {operator} tests with {len(ref_cmds)} parameter(s)")
111 error = False
112 for i, cmd in enumerate(ref_cmds):
113 try:
114 _run_sh_command(args, args.ref_model_dir.absolute(), cmd)
115 logger.info(
116 f"{operator} test batch {(i+1)}/{len(ref_cmds)} created successfully"
117 )
118 except Exception as e:
119 logger.error(
120 f"{operator} test batch {(i+1)}/{len(ref_cmds)} unsuccessful, skipping"
121 )
122 logger.error(f" build_op_tests error: {e} ")
123 error = True
124 if error:
125 raise (GenConformanceError())
126
127 return op_build_dir
128
129
130def _check_to_include_test(profile, test_name, exclude_negative_tests=False):
131 """Check test name for exclusions, return False to indicate excluded."""
132 excludes = ["ERRORIF"] if exclude_negative_tests else []
133 excludes.extend(PROFILE_OPS_INFO[profile]["exclude_types"])
134
135 for exclusion in excludes:
136 if f"_{exclusion}_" in test_name:
137 return False
138 return True
139
140
141def _get_all_tests_list(
142 profile, test_root_dir, operator, exclude_negative_tests=False, include_all=False
143):
144 """Create test list based on tests in the test_dir."""
145 test_dir = test_root_dir / operator
146 if not test_dir.is_dir():
147 # Tests are split into multiple dirs, for example: conv2d_1x1, conv2d_3x3
148 test_dir = test_root_dir
149 directories = [
150 tdir for tdir in test_dir.glob("*") if tdir.name.startswith(operator)
151 ]
152 else:
153 directories = [test_dir]
154
155 tests = []
156 for tdir in directories:
157 tests.extend(
158 [
159 test
160 for test in tdir.glob("*")
161 if include_all
162 or _check_to_include_test(profile, test.name, exclude_negative_tests)
163 ]
164 )
165 return tests
166
167
168def generate_results(args, operator, op_build_dir, tests=None):
169 """Run tests on reference model and save result to the test directory."""
170 num_cores = args.num_cores
171 run_tests_cmd = "tosa_verif_run_tests"
172
173 ref_model_path = args.ref_model_dir / LOCATION_REF_MODEL_BINARY
174 ref_cmd_base = ref_cmd = [
175 run_tests_cmd,
176 "--ref-model-path",
177 str(ref_model_path.absolute()),
178 "-j",
179 str(num_cores),
180 "-v",
181 "-t",
182 ]
183 ref_cmds = []
184
185 if not tests:
186 # Do not need to run ERRORIF tests as they don't have result files
187 tests = _get_all_tests_list(
188 args.profile, op_build_dir, operator, exclude_negative_tests=True
189 )
190
191 for test in tests:
192 ref_cmd = ref_cmd_base.copy()
193 ref_cmd.append(str(test))
194 ref_cmds.append(ref_cmd)
195
196 fail_string = "UNEXPECTED_FAILURE"
197 failed_counter = 0
198
199 job_pool = mp.Pool(args.num_cores)
200 sh_partial = partial(_run_sh_command, args, args.ref_model_dir.absolute())
201 pool_results = job_pool.map(sh_partial, ref_cmds)
202 job_pool.close()
203 job_pool.join()
204
205 # Use captured output for run_sh_command to work out if test passed.
206 for i, rc in enumerate(pool_results):
207 if fail_string in str(rc[0]):
208 logger.error(f"Test {i+1}/{len(ref_cmds)}: {ref_cmds[i][-1]} failed.")
209 failed_counter += 1
210 else:
211 logger.info(f"Test {i+1}/{len(ref_cmds)}: {ref_cmds[i][-1]} passed.")
212
213 logger.info(f"{len(ref_cmds)-failed_counter}/{len(ref_cmds)} tests passed")
214 logger.info("Ran tests on model and saved results of passing tests")
215
216
217def convert_tests(
218 args,
219 operator,
220 op_build_dir,
221 output_dir,
Jeremy Johnson88588622022-07-12 16:42:29 +0100222 profiles,
Jeremy Johnson0ecfa372022-06-30 14:27:56 +0100223 tests=None,
224 group=None,
225 trim_op_subdir=False,
226):
227 """Convert tests to JSON and save to output directory."""
228 ref_model_dir = args.ref_model_dir
229
230 if group:
231 output_dir = output_dir / group
232
233 ref_cmd_base = ["--ref-model-directory", str(ref_model_dir)]
Jeremy Johnson88588622022-07-12 16:42:29 +0100234 for profile in profiles:
235 ref_cmd_base.extend(["--profile", profile])
Jeremy Johnson0ecfa372022-06-30 14:27:56 +0100236 if args.framework_schema:
237 ref_cmd_base.extend(["--framework-schema", str(args.framework_schema)])
238 ref_cmd_base.append("--output-directory")
239
240 ref_cmds = []
241
242 if not tests:
243 tests = _get_all_tests_list(args.profile, op_build_dir, operator)
244 logger.info(f"Converting all {args.profile} profile tests")
245
246 # Controls if we copy the tests in their operator sub-directory or not
247 output_dir_relative_pos = -1 if trim_op_subdir else -2
248 for test in tests:
249 logger.info(f"Test chosen: {test}")
250 ref_cmd = ref_cmd_base.copy()
251 full_output_directory = output_dir / test.relative_to(
252 *test.parts[:output_dir_relative_pos]
253 )
254 ref_cmd.append(str(full_output_directory))
255 ref_cmd.append(str(test))
256 ref_cmds.append(ref_cmd)
257
258 if len(ref_cmds) == 0:
259 logger.warning("No tests found. Nothing to convert")
260 return
261
262 job_pool = mp.Pool(args.num_cores)
263
264 pool_results = job_pool.map(c2c_main, ref_cmds)
265 job_pool.close()
266 job_pool.join()
267
268 failed_counter = 0
269 for i, result in enumerate(pool_results):
270 if result != 0:
271 logger.error(
272 f"test {i+1}/{len(ref_cmds)}: {ref_cmds[i][-1]} failed to convert."
273 )
274 failed_counter += 1
275 else:
276 logger.info(f"test {i+1}/{len(ref_cmds)}: {ref_cmds[i][-1]} converted")
277 logger.info(
278 f"{len(ref_cmds)-failed_counter}/{len(ref_cmds)} tests successfully converted"
279 )
280
281 if failed_counter > 0:
282 logger.error(f"Stopping due to {failed_counter} test conversion errors")
283 raise (GenConformanceError())
284
285 logger.info("Converted tests to JSON and saved to output directory")
286
287 return output_dir
288
289
290def get_op_tests_selection(args, operator, op_build_dir, test_params, negative=False):
291 """Use test picker to get subsection of tests generated."""
292 assert operator in test_params
293 try:
294 op_params = test_params[operator]
295 op = Operator.registry[operator](
296 op_build_dir,
297 op_params,
298 negative,
299 exclude_types=PROFILE_OPS_INFO[args.profile]["exclude_types"],
300 )
301 except KeyError:
302 logger.error(f"{operator} operator is not supported by test_select")
303 raise (GenConformanceError())
304
305 return op.select_tests()
306
307
308def check_op_tests(args, operator, output_dir):
309 """Move test folders than contain files larger than 30MB to new directory."""
310 destination_dir = str(args.output_dir) + "_large_files"
311
312 tests = _get_all_tests_list(args.profile, output_dir, operator, include_all=True)
313 if not tests:
314 logger.error(
315 f"Couldn't find any tests to size check for {operator} in {output_dir}"
316 )
317 raise (GenConformanceError())
318
319 for tdir in tests:
320 move_dir = False
321 test_files = [file for file in tdir.glob("*")]
322 for file in test_files:
323 file_size = os.stat(file).st_size / 1024**2
324 if file_size > 30:
325 move_dir = True
326
327 if move_dir:
328 move_destination = destination_dir / tdir.relative_to(output_dir)
329 logger.warning(
330 f"{tdir.relative_to(output_dir)} contains files that are too large (>30MB), test moved to new folder: {destination_dir}"
331 )
332
333 if move_destination.is_dir():
334 logger.warning(
335 f"{move_destination} directory already exists, deleting existing."
336 )
337 shutil.rmtree(str(move_destination))
338 shutil.move(str(tdir), move_destination)
339
340
341def copy_rename_framework_tests(args, operator, test_picks):
342 """Copy framework tests into new folder and rename them if needed.
343
344 The tests are renamed to match the framework operator names if an
345 alternate name has been used instead.
346 """
347 framework_tests_dir = args.framework_tests_dir
348 new_tests_dir = args.build_dir / "frameworks" / operator
349 os.makedirs(new_tests_dir, exist_ok=True)
350
351 # Get the framework tests operator name
352 if "alternate_names" in test_picks[operator]:
353 alternate_names = test_picks[operator]["alternate_names"]
354 else:
355 alternate_names = [operator]
356
357 # Get the alternate named test directories for the operator
358 for alt_name in alternate_names:
359 test_prefix = f"test_{alt_name}"
360 test_dirs = list(framework_tests_dir.glob(f"{test_prefix}_*"))
361
362 # Copy tests to new directory and rename to match framework operator names
363 # - if there is just 1 alternate name, replace the full test prefix
364 # test_add_... -> add_...
365 # - if there are multiple alternate names, just replace the "test"
366 # test_concatv2_... -> concatenation_concatv2_...
367 old_prefix = test_prefix if len(alternate_names) == 1 else "test"
368
369 for tdir in test_dirs:
370 new_test_name = tdir.name.replace(old_prefix, operator)
371 copy_destination = new_tests_dir / new_test_name
372 logger.debug(f"copying test folder {tdir} to {copy_destination}")
373 copy_tree(str(tdir), str(copy_destination))
374
375 logger.info(f"Copied and renamed {len(test_dirs)} framework test folders")
376 return new_tests_dir.parent
377
378
379def get_framework_tests_selection(args, operator, test_picks, op_build_dir):
380 """Get the list of pre-chosen tests with relative paths."""
381 try:
382 tests = test_picks[operator]["tests"]
383 except KeyError:
384 logger.error(f"Framework test selection not defined for {operator} operator")
385 raise (GenConformanceError())
386
387 test_paths = [op_build_dir / operator / test for test in tests]
388 return test_paths
389
390
391def parse_args(argv=None):
392 """Parse the arguments."""
393 parser = argparse.ArgumentParser()
Jeremy Johnson88588622022-07-12 16:42:29 +0100394 profiles = list(PROFILE_OPS_INFO.keys())
395 parser.add_argument(
396 "--profile",
397 dest="profile",
398 choices=profiles,
399 default=profiles[0],
400 type=str,
401 help=f"TOSA profile (default is {profiles[0]})",
402 )
Jeremy Johnson0ecfa372022-06-30 14:27:56 +0100403 parser.add_argument(
404 "--operators",
405 type=str,
406 nargs="*",
407 help="The operator(s) to create tests for, if not supplied all tests will be created",
408 )
409 parser.add_argument(
Jeremy Johnson88588622022-07-12 16:42:29 +0100410 "--unit-tests",
411 dest="unit_tests",
412 choices=["operator", "framework", "both"],
413 default="operator",
414 type=str,
415 help="Which unit tests are produced (default is operator)",
416 )
417 parser.add_argument(
418 "--test-type",
419 dest="test_type",
420 choices=["positive", "negative", "both"],
421 default="both",
422 type=str,
423 help="Type of tests produced (default is both)",
424 )
425 parser.add_argument(
Jeremy Johnson0ecfa372022-06-30 14:27:56 +0100426 "--ref-model-directory",
427 dest="ref_model_dir",
428 type=Path,
429 required=True,
430 help="Reference Model directory (must be pre-built)",
431 )
Jeremy Johnson88588622022-07-12 16:42:29 +0100432 parser.add_argument(
433 "--framework-tests-directory",
434 dest="framework_tests_dir",
435 type=Path,
436 default=Path.cwd() / "tests",
437 help="The pre-built framework tests directory (default is tests)",
438 )
439 parser.add_argument(
440 "--framework-schema",
441 dest="framework_schema",
442 type=Path,
443 help="Framework flatbuffers schema needed to convert framework models",
444 )
445 parser.add_argument(
446 "--build-directory",
447 dest="build_dir",
448 type=Path,
449 default=Path.cwd() / "conformance_build",
450 help="Temporary build directory for files created during this process (default is conformance_build)",
451 )
452 parser.add_argument(
453 "--output-directory",
454 dest="output_dir",
455 type=Path,
456 default=Path.cwd() / "conformance",
457 help="Output directory (default is conformance)",
458 )
Jeremy Johnson0ecfa372022-06-30 14:27:56 +0100459 script_dir = Path(__file__).parent.absolute()
460 parser.add_argument(
461 "--test-param-json-directory",
462 dest="param_json_dir",
463 type=Path,
464 default=script_dir,
Jeremy Johnson88588622022-07-12 16:42:29 +0100465 help=f"Test parameters (ops info) JSON file directory (default is {script_dir})",
Jeremy Johnson0ecfa372022-06-30 14:27:56 +0100466 )
467 parser.add_argument(
468 "--convert-all-tests",
469 action="store_true",
470 help="Converts all tests instead of those picked by test_select",
471 )
472 parser.add_argument(
473 "--keep-large-files",
474 action="store_true",
475 help="Keeps tests that contain files larger than 30MB in output directory",
476 )
477 parser.add_argument(
478 "--capture-output",
479 action="store_true",
480 help="Prints output of running sh commands",
481 )
482 parser.add_argument(
Jeremy Johnson0ecfa372022-06-30 14:27:56 +0100483 "-j",
484 dest="num_cores",
485 type=int,
486 default=6,
487 help="Number of simultaneous jobs to split the tasks into for multiprocessing",
488 )
489 parser.add_argument(
490 "-v",
491 dest="verbosity",
492 action="count",
493 default=0,
494 help="Verbosity (can be used multiple times for more details)",
495 )
Jeremy Johnson0ecfa372022-06-30 14:27:56 +0100496 args = parser.parse_args(argv)
497
498 return args
499
500
501def main():
502 args = parse_args()
503
504 if not args.ref_model_dir.is_dir():
505 logger.error(
506 f"Missing or invalid reference model directory: {args.ref_model_dir}"
507 )
508 return 2
509 else:
510 ref_model = args.ref_model_dir / LOCATION_REF_MODEL_BINARY
511 if not ref_model.is_file():
512 logger.error(
513 f"{LOCATION_REF_MODEL_BINARY} not found in {args.ref_model_dir}\nHave you built the reference model?"
514 )
515 return 2
516 if args.unit_tests in ["framework", "both"]:
517 if not args.framework_schema:
518 logger.error(
519 "Need to supply location of Framework flatbuffers schema via --framework-schema"
520 )
521 return 2
522 if not args.framework_tests_dir.is_dir():
523 logger.error(
524 f"Missing or invalid framework tests directory: {args.framework_tests_dir}"
525 )
526 return 2
527
528 loglevels = (logging.WARNING, logging.INFO, logging.DEBUG)
529 loglevel = loglevels[min(args.verbosity, len(loglevels) - 1)]
530 logger.setLevel(loglevel)
531 # Set other loggers the same
532 logging.getLogger("test_select").setLevel(loglevel)
533 logging.getLogger("convert2conformance").setLevel(loglevel)
534
535 print(f"Creating conformance tests for TOSA {args.profile} profile")
536 print(f"Output directory: {args.output_dir}")
537
538 args.build_dir = args.build_dir.resolve()
539 logger.debug(f"Creating build directory: {args.build_dir}")
540 args.build_dir.mkdir(parents=True, exist_ok=True)
541
542 try:
543 # Framework unit tests
544 if args.unit_tests in ["framework", "both"]:
545 logger.debug("Creating FRAMEWORK unit tests")
546 test_picks_file = (
547 args.param_json_dir / PROFILE_OPS_INFO[args.profile]["framework_tests"]
548 )
549 try:
550 with open(test_picks_file, "r") as fd:
551 test_picks = json.load(fd)
552 except Exception as e:
553 logger.error(
554 f"Couldn't load framework tests info - {test_picks_file}: {e}"
555 )
556 return 1
557
558 operators = args.operators
559 if not operators:
560 # Create tests for all the operators
561 operators = list(test_picks.keys())
562
563 root_output_dir = args.output_dir / "frameworks" / "tflite" / "operators"
564 for op in operators:
565 if op not in test_picks:
566 logger.warning(
567 f"Framework op {op} not found in {test_picks_file} - skipping"
568 )
569 continue
570
571 logger.debug(f"Copying and renaming {op}")
572 framework_test_dir = copy_rename_framework_tests(args, op, test_picks)
Jeremy Johnson88588622022-07-12 16:42:29 +0100573 profiles = test_picks[op]["profile"]
Jeremy Johnson0ecfa372022-06-30 14:27:56 +0100574 if args.convert_all_tests:
575 logger.debug("Running and converting all framework tests")
576 convert_tests(
577 args,
578 op,
579 framework_test_dir,
580 root_output_dir,
Jeremy Johnson88588622022-07-12 16:42:29 +0100581 profiles,
Jeremy Johnson0ecfa372022-06-30 14:27:56 +0100582 trim_op_subdir=True,
583 )
584 else:
585 framework_tests = get_framework_tests_selection(
586 args, op, test_picks, framework_test_dir
587 )
588 convert_tests(
589 args,
590 op,
591 framework_test_dir,
592 root_output_dir,
Jeremy Johnson88588622022-07-12 16:42:29 +0100593 profiles,
Jeremy Johnson0ecfa372022-06-30 14:27:56 +0100594 tests=framework_tests,
595 trim_op_subdir=True,
596 )
597
598 # Operator unit tests
599 if args.unit_tests in ["operator", "both"]:
600 logger.debug("Creating OPERATOR unit tests")
601 test_params_file = (
602 args.param_json_dir
603 / PROFILE_OPS_INFO[args.profile]["operator_test_params"]
604 )
605 try:
606 with open(test_params_file, "r") as fd:
607 test_params = json.load(fd)
608 except Exception as e:
609 logger.error(
610 f"Couldn't load operator test params - {test_params_file}: {e}"
611 )
612 return 1
613
614 operators = args.operators
615 if not operators:
616 # Create tests for all the operators
617 operators = list(test_params.keys())
618
619 for op in operators:
620 if op not in test_params:
621 logger.warning(
622 f"{op} operator parameters not found in {test_params_file} - skipping"
623 )
624 continue
625
626 if (
627 args.test_type == "negative"
628 and "no_negative_tests" in test_params[op]
629 and test_params[op]["no_negative_tests"]
630 ):
631 logger.warning(f"No negative tests for {op}")
632 continue
633
634 op_build_dir = build_op_tests(args, op, test_params)
635
636 operator_group = test_params[op]["group"]
637 root_output_dir = args.output_dir / "operators"
Jeremy Johnson88588622022-07-12 16:42:29 +0100638 profiles = test_params[op]["profile"]
Jeremy Johnson0ecfa372022-06-30 14:27:56 +0100639 if args.convert_all_tests:
640 logger.debug(f"Running and converting all {op} tests")
641 generate_results(args, op, op_build_dir)
642 output_dir = convert_tests(
Jeremy Johnson88588622022-07-12 16:42:29 +0100643 args,
644 op,
645 op_build_dir,
646 root_output_dir,
647 profiles,
648 group=operator_group,
Jeremy Johnson0ecfa372022-06-30 14:27:56 +0100649 )
650 else:
651 if args.test_type in ["positive", "both"]:
652 tests_gen1, tests_gen2 = tee(
653 get_op_tests_selection(args, op, op_build_dir, test_params)
654 )
655 generate_results(args, op, op_build_dir, tests_gen1)
656 output_dir = convert_tests(
657 args,
658 op,
659 op_build_dir,
660 root_output_dir,
Jeremy Johnson88588622022-07-12 16:42:29 +0100661 profiles,
Jeremy Johnson0ecfa372022-06-30 14:27:56 +0100662 tests=tests_gen2,
663 group=operator_group,
664 )
665 if args.test_type in ["negative", "both"] and (
666 "no_negative_tests" not in test_params[op]
667 or not test_params[op]["no_negative_tests"]
668 ):
669 negative_tests = get_op_tests_selection(
670 args, op, op_build_dir, test_params, negative=True
671 )
672 output_dir = convert_tests(
673 args,
674 op,
675 op_build_dir,
676 root_output_dir,
Jeremy Johnson88588622022-07-12 16:42:29 +0100677 profiles,
Jeremy Johnson0ecfa372022-06-30 14:27:56 +0100678 tests=negative_tests,
679 group=operator_group,
680 )
681 if not args.keep_large_files:
682 check_op_tests(args, op, output_dir)
683 except GenConformanceError:
684 return 1
685
686 return 0
687
688
689if __name__ == "__main__":
690 exit(main())