Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame^] | 1 | #!/usr/bin/env python3 |
| 2 | # Copyright (c) 2021-2022, ARM Limited. |
| 3 | # SPDX-License-Identifier: Apache-2.0 |
| 4 | """Build conformance tests. |
| 5 | |
| 6 | Steps: |
| 7 | - Specific input shapes (or tests) are specified and produced by using the |
| 8 | settings in the .json files. |
| 9 | - Tests are selected to produce a good coverage. |
| 10 | - Tests are run on the reference model to produce the correct output files. |
| 11 | - Tests are converted into JSON format and saved to desired output directory. |
| 12 | """ |
| 13 | import argparse |
| 14 | import json |
| 15 | import logging |
| 16 | import multiprocessing as mp |
| 17 | import os |
| 18 | import shlex |
| 19 | import shutil |
| 20 | import subprocess |
| 21 | from functools import partial |
| 22 | from itertools import tee |
| 23 | from pathlib import Path |
| 24 | |
| 25 | from conformance.test_select import Operator |
| 26 | from convert2conformance.convert2conformance import main as c2c_main |
| 27 | from distutils.dir_util import copy_tree |
| 28 | |
| 29 | logging.basicConfig() |
| 30 | logger = logging.getLogger("tosa_verif_conformance_generator") |
| 31 | |
| 32 | # Configuration for each TOSA profile |
| 33 | PROFILE_OPS_INFO = { |
| 34 | "base": { |
| 35 | "operator_test_params": "tosa_base_profile_ops_info.json", |
| 36 | "framework_tests": "tosa_base_profile_framework_ops_info.json", |
| 37 | "exclude_types": ["float"], |
| 38 | } |
| 39 | } |
| 40 | |
| 41 | LOCATION_REF_MODEL_BINARY = Path("build/reference_model/tosa_reference_model") |
| 42 | |
| 43 | |
| 44 | class GenConformanceError(Exception): |
| 45 | """Generation error reporting exception.""" |
| 46 | |
| 47 | pass |
| 48 | |
| 49 | |
| 50 | def _run_sh_command(args, cwd, full_cmd): |
| 51 | """Run an external command and capture stdout/stderr.""" |
| 52 | # Quote the command line for printing |
| 53 | full_cmd_esc = [shlex.quote(x) for x in full_cmd] |
| 54 | if args.capture_output: |
| 55 | logger.debug(f"Command: {full_cmd_esc}") |
| 56 | |
| 57 | rc = subprocess.run( |
| 58 | full_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd |
| 59 | ) |
| 60 | |
| 61 | if args.capture_output: |
| 62 | stdout = rc.stdout.decode("utf-8") |
| 63 | logger.debug(f"stdout: \n{stdout}") |
| 64 | if rc.returncode != 0: |
| 65 | |
| 66 | raise Exception( |
| 67 | "Error running command: {}.\n{}".format( |
| 68 | " ".join(full_cmd_esc), rc.stderr.decode("utf-8") |
| 69 | ) |
| 70 | ) |
| 71 | return (rc.stdout, rc.stderr) |
| 72 | |
| 73 | |
| 74 | def build_op_tests(args, operator, test_params): |
| 75 | """Build tests for a given operator. |
| 76 | |
| 77 | Builds a set of tests based on the parameters defined in test_params |
| 78 | |
| 79 | Returns operator output directory |
| 80 | """ |
| 81 | assert operator in test_params |
| 82 | |
| 83 | build_tests_cmd = "tosa_verif_build_tests" |
| 84 | op_build_dir = args.build_dir |
| 85 | |
| 86 | ref_cmd_base = [ |
| 87 | build_tests_cmd, |
| 88 | "--filter", |
| 89 | operator, |
| 90 | "-o", |
| 91 | str(op_build_dir), |
| 92 | "--seed", |
| 93 | "42", |
| 94 | ] |
| 95 | |
| 96 | ref_cmds = [] |
| 97 | |
| 98 | if args.test_type in ["positive", "both"]: |
| 99 | # Append extra parameters and run test generator for each set of parameters. |
| 100 | for arglist in test_params[operator]["generator_args"]: |
| 101 | ref_cmd_pos_test = ref_cmd_base.copy() |
| 102 | ref_cmd_pos_test.extend(arglist) |
| 103 | ref_cmds.append(ref_cmd_pos_test) |
| 104 | |
| 105 | if args.test_type in ["negative", "both"]: |
| 106 | ref_cmd_neg_test = ref_cmd_base.copy() |
| 107 | ref_cmd_neg_test.extend(["--test-type", "negative"]) |
| 108 | ref_cmds.append(ref_cmd_neg_test) |
| 109 | |
| 110 | logger.debug(f"Creating {operator} tests with {len(ref_cmds)} parameter(s)") |
| 111 | error = False |
| 112 | for i, cmd in enumerate(ref_cmds): |
| 113 | try: |
| 114 | _run_sh_command(args, args.ref_model_dir.absolute(), cmd) |
| 115 | logger.info( |
| 116 | f"{operator} test batch {(i+1)}/{len(ref_cmds)} created successfully" |
| 117 | ) |
| 118 | except Exception as e: |
| 119 | logger.error( |
| 120 | f"{operator} test batch {(i+1)}/{len(ref_cmds)} unsuccessful, skipping" |
| 121 | ) |
| 122 | logger.error(f" build_op_tests error: {e} ") |
| 123 | error = True |
| 124 | if error: |
| 125 | raise (GenConformanceError()) |
| 126 | |
| 127 | return op_build_dir |
| 128 | |
| 129 | |
| 130 | def _check_to_include_test(profile, test_name, exclude_negative_tests=False): |
| 131 | """Check test name for exclusions, return False to indicate excluded.""" |
| 132 | excludes = ["ERRORIF"] if exclude_negative_tests else [] |
| 133 | excludes.extend(PROFILE_OPS_INFO[profile]["exclude_types"]) |
| 134 | |
| 135 | for exclusion in excludes: |
| 136 | if f"_{exclusion}_" in test_name: |
| 137 | return False |
| 138 | return True |
| 139 | |
| 140 | |
| 141 | def _get_all_tests_list( |
| 142 | profile, test_root_dir, operator, exclude_negative_tests=False, include_all=False |
| 143 | ): |
| 144 | """Create test list based on tests in the test_dir.""" |
| 145 | test_dir = test_root_dir / operator |
| 146 | if not test_dir.is_dir(): |
| 147 | # Tests are split into multiple dirs, for example: conv2d_1x1, conv2d_3x3 |
| 148 | test_dir = test_root_dir |
| 149 | directories = [ |
| 150 | tdir for tdir in test_dir.glob("*") if tdir.name.startswith(operator) |
| 151 | ] |
| 152 | else: |
| 153 | directories = [test_dir] |
| 154 | |
| 155 | tests = [] |
| 156 | for tdir in directories: |
| 157 | tests.extend( |
| 158 | [ |
| 159 | test |
| 160 | for test in tdir.glob("*") |
| 161 | if include_all |
| 162 | or _check_to_include_test(profile, test.name, exclude_negative_tests) |
| 163 | ] |
| 164 | ) |
| 165 | return tests |
| 166 | |
| 167 | |
| 168 | def generate_results(args, operator, op_build_dir, tests=None): |
| 169 | """Run tests on reference model and save result to the test directory.""" |
| 170 | num_cores = args.num_cores |
| 171 | run_tests_cmd = "tosa_verif_run_tests" |
| 172 | |
| 173 | ref_model_path = args.ref_model_dir / LOCATION_REF_MODEL_BINARY |
| 174 | ref_cmd_base = ref_cmd = [ |
| 175 | run_tests_cmd, |
| 176 | "--ref-model-path", |
| 177 | str(ref_model_path.absolute()), |
| 178 | "-j", |
| 179 | str(num_cores), |
| 180 | "-v", |
| 181 | "-t", |
| 182 | ] |
| 183 | ref_cmds = [] |
| 184 | |
| 185 | if not tests: |
| 186 | # Do not need to run ERRORIF tests as they don't have result files |
| 187 | tests = _get_all_tests_list( |
| 188 | args.profile, op_build_dir, operator, exclude_negative_tests=True |
| 189 | ) |
| 190 | |
| 191 | for test in tests: |
| 192 | ref_cmd = ref_cmd_base.copy() |
| 193 | ref_cmd.append(str(test)) |
| 194 | ref_cmds.append(ref_cmd) |
| 195 | |
| 196 | fail_string = "UNEXPECTED_FAILURE" |
| 197 | failed_counter = 0 |
| 198 | |
| 199 | job_pool = mp.Pool(args.num_cores) |
| 200 | sh_partial = partial(_run_sh_command, args, args.ref_model_dir.absolute()) |
| 201 | pool_results = job_pool.map(sh_partial, ref_cmds) |
| 202 | job_pool.close() |
| 203 | job_pool.join() |
| 204 | |
| 205 | # Use captured output for run_sh_command to work out if test passed. |
| 206 | for i, rc in enumerate(pool_results): |
| 207 | if fail_string in str(rc[0]): |
| 208 | logger.error(f"Test {i+1}/{len(ref_cmds)}: {ref_cmds[i][-1]} failed.") |
| 209 | failed_counter += 1 |
| 210 | else: |
| 211 | logger.info(f"Test {i+1}/{len(ref_cmds)}: {ref_cmds[i][-1]} passed.") |
| 212 | |
| 213 | logger.info(f"{len(ref_cmds)-failed_counter}/{len(ref_cmds)} tests passed") |
| 214 | logger.info("Ran tests on model and saved results of passing tests") |
| 215 | |
| 216 | |
| 217 | def convert_tests( |
| 218 | args, |
| 219 | operator, |
| 220 | op_build_dir, |
| 221 | output_dir, |
| 222 | tests=None, |
| 223 | group=None, |
| 224 | trim_op_subdir=False, |
| 225 | ): |
| 226 | """Convert tests to JSON and save to output directory.""" |
| 227 | ref_model_dir = args.ref_model_dir |
| 228 | |
| 229 | if group: |
| 230 | output_dir = output_dir / group |
| 231 | |
| 232 | ref_cmd_base = ["--ref-model-directory", str(ref_model_dir)] |
| 233 | if args.framework_schema: |
| 234 | ref_cmd_base.extend(["--framework-schema", str(args.framework_schema)]) |
| 235 | ref_cmd_base.append("--output-directory") |
| 236 | |
| 237 | ref_cmds = [] |
| 238 | |
| 239 | if not tests: |
| 240 | tests = _get_all_tests_list(args.profile, op_build_dir, operator) |
| 241 | logger.info(f"Converting all {args.profile} profile tests") |
| 242 | |
| 243 | # Controls if we copy the tests in their operator sub-directory or not |
| 244 | output_dir_relative_pos = -1 if trim_op_subdir else -2 |
| 245 | for test in tests: |
| 246 | logger.info(f"Test chosen: {test}") |
| 247 | ref_cmd = ref_cmd_base.copy() |
| 248 | full_output_directory = output_dir / test.relative_to( |
| 249 | *test.parts[:output_dir_relative_pos] |
| 250 | ) |
| 251 | ref_cmd.append(str(full_output_directory)) |
| 252 | ref_cmd.append(str(test)) |
| 253 | ref_cmds.append(ref_cmd) |
| 254 | |
| 255 | if len(ref_cmds) == 0: |
| 256 | logger.warning("No tests found. Nothing to convert") |
| 257 | return |
| 258 | |
| 259 | job_pool = mp.Pool(args.num_cores) |
| 260 | |
| 261 | pool_results = job_pool.map(c2c_main, ref_cmds) |
| 262 | job_pool.close() |
| 263 | job_pool.join() |
| 264 | |
| 265 | failed_counter = 0 |
| 266 | for i, result in enumerate(pool_results): |
| 267 | if result != 0: |
| 268 | logger.error( |
| 269 | f"test {i+1}/{len(ref_cmds)}: {ref_cmds[i][-1]} failed to convert." |
| 270 | ) |
| 271 | failed_counter += 1 |
| 272 | else: |
| 273 | logger.info(f"test {i+1}/{len(ref_cmds)}: {ref_cmds[i][-1]} converted") |
| 274 | logger.info( |
| 275 | f"{len(ref_cmds)-failed_counter}/{len(ref_cmds)} tests successfully converted" |
| 276 | ) |
| 277 | |
| 278 | if failed_counter > 0: |
| 279 | logger.error(f"Stopping due to {failed_counter} test conversion errors") |
| 280 | raise (GenConformanceError()) |
| 281 | |
| 282 | logger.info("Converted tests to JSON and saved to output directory") |
| 283 | |
| 284 | return output_dir |
| 285 | |
| 286 | |
| 287 | def get_op_tests_selection(args, operator, op_build_dir, test_params, negative=False): |
| 288 | """Use test picker to get subsection of tests generated.""" |
| 289 | assert operator in test_params |
| 290 | try: |
| 291 | op_params = test_params[operator] |
| 292 | op = Operator.registry[operator]( |
| 293 | op_build_dir, |
| 294 | op_params, |
| 295 | negative, |
| 296 | exclude_types=PROFILE_OPS_INFO[args.profile]["exclude_types"], |
| 297 | ) |
| 298 | except KeyError: |
| 299 | logger.error(f"{operator} operator is not supported by test_select") |
| 300 | raise (GenConformanceError()) |
| 301 | |
| 302 | return op.select_tests() |
| 303 | |
| 304 | |
| 305 | def check_op_tests(args, operator, output_dir): |
| 306 | """Move test folders than contain files larger than 30MB to new directory.""" |
| 307 | destination_dir = str(args.output_dir) + "_large_files" |
| 308 | |
| 309 | tests = _get_all_tests_list(args.profile, output_dir, operator, include_all=True) |
| 310 | if not tests: |
| 311 | logger.error( |
| 312 | f"Couldn't find any tests to size check for {operator} in {output_dir}" |
| 313 | ) |
| 314 | raise (GenConformanceError()) |
| 315 | |
| 316 | for tdir in tests: |
| 317 | move_dir = False |
| 318 | test_files = [file for file in tdir.glob("*")] |
| 319 | for file in test_files: |
| 320 | file_size = os.stat(file).st_size / 1024**2 |
| 321 | if file_size > 30: |
| 322 | move_dir = True |
| 323 | |
| 324 | if move_dir: |
| 325 | move_destination = destination_dir / tdir.relative_to(output_dir) |
| 326 | logger.warning( |
| 327 | f"{tdir.relative_to(output_dir)} contains files that are too large (>30MB), test moved to new folder: {destination_dir}" |
| 328 | ) |
| 329 | |
| 330 | if move_destination.is_dir(): |
| 331 | logger.warning( |
| 332 | f"{move_destination} directory already exists, deleting existing." |
| 333 | ) |
| 334 | shutil.rmtree(str(move_destination)) |
| 335 | shutil.move(str(tdir), move_destination) |
| 336 | |
| 337 | |
| 338 | def copy_rename_framework_tests(args, operator, test_picks): |
| 339 | """Copy framework tests into new folder and rename them if needed. |
| 340 | |
| 341 | The tests are renamed to match the framework operator names if an |
| 342 | alternate name has been used instead. |
| 343 | """ |
| 344 | framework_tests_dir = args.framework_tests_dir |
| 345 | new_tests_dir = args.build_dir / "frameworks" / operator |
| 346 | os.makedirs(new_tests_dir, exist_ok=True) |
| 347 | |
| 348 | # Get the framework tests operator name |
| 349 | if "alternate_names" in test_picks[operator]: |
| 350 | alternate_names = test_picks[operator]["alternate_names"] |
| 351 | else: |
| 352 | alternate_names = [operator] |
| 353 | |
| 354 | # Get the alternate named test directories for the operator |
| 355 | for alt_name in alternate_names: |
| 356 | test_prefix = f"test_{alt_name}" |
| 357 | test_dirs = list(framework_tests_dir.glob(f"{test_prefix}_*")) |
| 358 | |
| 359 | # Copy tests to new directory and rename to match framework operator names |
| 360 | # - if there is just 1 alternate name, replace the full test prefix |
| 361 | # test_add_... -> add_... |
| 362 | # - if there are multiple alternate names, just replace the "test" |
| 363 | # test_concatv2_... -> concatenation_concatv2_... |
| 364 | old_prefix = test_prefix if len(alternate_names) == 1 else "test" |
| 365 | |
| 366 | for tdir in test_dirs: |
| 367 | new_test_name = tdir.name.replace(old_prefix, operator) |
| 368 | copy_destination = new_tests_dir / new_test_name |
| 369 | logger.debug(f"copying test folder {tdir} to {copy_destination}") |
| 370 | copy_tree(str(tdir), str(copy_destination)) |
| 371 | |
| 372 | logger.info(f"Copied and renamed {len(test_dirs)} framework test folders") |
| 373 | return new_tests_dir.parent |
| 374 | |
| 375 | |
| 376 | def get_framework_tests_selection(args, operator, test_picks, op_build_dir): |
| 377 | """Get the list of pre-chosen tests with relative paths.""" |
| 378 | try: |
| 379 | tests = test_picks[operator]["tests"] |
| 380 | except KeyError: |
| 381 | logger.error(f"Framework test selection not defined for {operator} operator") |
| 382 | raise (GenConformanceError()) |
| 383 | |
| 384 | test_paths = [op_build_dir / operator / test for test in tests] |
| 385 | return test_paths |
| 386 | |
| 387 | |
| 388 | def parse_args(argv=None): |
| 389 | """Parse the arguments.""" |
| 390 | parser = argparse.ArgumentParser() |
| 391 | parser.add_argument( |
| 392 | "--operators", |
| 393 | type=str, |
| 394 | nargs="*", |
| 395 | help="The operator(s) to create tests for, if not supplied all tests will be created", |
| 396 | ) |
| 397 | parser.add_argument( |
| 398 | "--ref-model-directory", |
| 399 | dest="ref_model_dir", |
| 400 | type=Path, |
| 401 | required=True, |
| 402 | help="Reference Model directory (must be pre-built)", |
| 403 | ) |
| 404 | script_dir = Path(__file__).parent.absolute() |
| 405 | parser.add_argument( |
| 406 | "--test-param-json-directory", |
| 407 | dest="param_json_dir", |
| 408 | type=Path, |
| 409 | default=script_dir, |
| 410 | help="Test parameters (ops info) JSON file directory", |
| 411 | ) |
| 412 | parser.add_argument( |
| 413 | "--convert-all-tests", |
| 414 | action="store_true", |
| 415 | help="Converts all tests instead of those picked by test_select", |
| 416 | ) |
| 417 | parser.add_argument( |
| 418 | "--keep-large-files", |
| 419 | action="store_true", |
| 420 | help="Keeps tests that contain files larger than 30MB in output directory", |
| 421 | ) |
| 422 | parser.add_argument( |
| 423 | "--capture-output", |
| 424 | action="store_true", |
| 425 | help="Prints output of running sh commands", |
| 426 | ) |
| 427 | parser.add_argument( |
| 428 | "--build-directory", |
| 429 | dest="build_dir", |
| 430 | type=Path, |
| 431 | default=Path.cwd() / "conformance_build", |
| 432 | help="Temporary build directory for files created during this process (default is conformance_build)", |
| 433 | ) |
| 434 | parser.add_argument( |
| 435 | "--output-directory", |
| 436 | dest="output_dir", |
| 437 | type=Path, |
| 438 | default=Path.cwd() / "conformance", |
| 439 | help="Output directory (default is conformance)", |
| 440 | ) |
| 441 | parser.add_argument( |
| 442 | "-j", |
| 443 | dest="num_cores", |
| 444 | type=int, |
| 445 | default=6, |
| 446 | help="Number of simultaneous jobs to split the tasks into for multiprocessing", |
| 447 | ) |
| 448 | parser.add_argument( |
| 449 | "-v", |
| 450 | dest="verbosity", |
| 451 | action="count", |
| 452 | default=0, |
| 453 | help="Verbosity (can be used multiple times for more details)", |
| 454 | ) |
| 455 | parser.add_argument( |
| 456 | "--unit-tests", |
| 457 | dest="unit_tests", |
| 458 | choices=["operator", "framework", "both"], |
| 459 | default="operator", |
| 460 | type=str, |
| 461 | help="Which unit tests are produced: operator, framework, or both", |
| 462 | ) |
| 463 | parser.add_argument( |
| 464 | "--test-type", |
| 465 | dest="test_type", |
| 466 | choices=["positive", "negative", "both"], |
| 467 | default="both", |
| 468 | type=str, |
| 469 | help="Type of tests produced: positive, negative, or both", |
| 470 | ) |
| 471 | profiles = list(PROFILE_OPS_INFO.keys()) |
| 472 | parser.add_argument( |
| 473 | "--profile", |
| 474 | dest="profile", |
| 475 | choices=profiles, |
| 476 | default=profiles[0], |
| 477 | type=str, |
| 478 | help="TOSA profile", |
| 479 | ) |
| 480 | parser.add_argument( |
| 481 | "--framework-tests-directory", |
| 482 | dest="framework_tests_dir", |
| 483 | type=Path, |
| 484 | default=Path.cwd() / "tests", |
| 485 | help="The pre-built framework tests directory (default is tests)", |
| 486 | ) |
| 487 | parser.add_argument( |
| 488 | "--framework-schema", |
| 489 | dest="framework_schema", |
| 490 | type=Path, |
| 491 | help="Framework flatbuffers schema needed to convert framework models", |
| 492 | ) |
| 493 | args = parser.parse_args(argv) |
| 494 | |
| 495 | return args |
| 496 | |
| 497 | |
| 498 | def main(): |
| 499 | args = parse_args() |
| 500 | |
| 501 | if not args.ref_model_dir.is_dir(): |
| 502 | logger.error( |
| 503 | f"Missing or invalid reference model directory: {args.ref_model_dir}" |
| 504 | ) |
| 505 | return 2 |
| 506 | else: |
| 507 | ref_model = args.ref_model_dir / LOCATION_REF_MODEL_BINARY |
| 508 | if not ref_model.is_file(): |
| 509 | logger.error( |
| 510 | f"{LOCATION_REF_MODEL_BINARY} not found in {args.ref_model_dir}\nHave you built the reference model?" |
| 511 | ) |
| 512 | return 2 |
| 513 | if args.unit_tests in ["framework", "both"]: |
| 514 | if not args.framework_schema: |
| 515 | logger.error( |
| 516 | "Need to supply location of Framework flatbuffers schema via --framework-schema" |
| 517 | ) |
| 518 | return 2 |
| 519 | if not args.framework_tests_dir.is_dir(): |
| 520 | logger.error( |
| 521 | f"Missing or invalid framework tests directory: {args.framework_tests_dir}" |
| 522 | ) |
| 523 | return 2 |
| 524 | |
| 525 | loglevels = (logging.WARNING, logging.INFO, logging.DEBUG) |
| 526 | loglevel = loglevels[min(args.verbosity, len(loglevels) - 1)] |
| 527 | logger.setLevel(loglevel) |
| 528 | # Set other loggers the same |
| 529 | logging.getLogger("test_select").setLevel(loglevel) |
| 530 | logging.getLogger("convert2conformance").setLevel(loglevel) |
| 531 | |
| 532 | print(f"Creating conformance tests for TOSA {args.profile} profile") |
| 533 | print(f"Output directory: {args.output_dir}") |
| 534 | |
| 535 | args.build_dir = args.build_dir.resolve() |
| 536 | logger.debug(f"Creating build directory: {args.build_dir}") |
| 537 | args.build_dir.mkdir(parents=True, exist_ok=True) |
| 538 | |
| 539 | try: |
| 540 | # Framework unit tests |
| 541 | if args.unit_tests in ["framework", "both"]: |
| 542 | logger.debug("Creating FRAMEWORK unit tests") |
| 543 | test_picks_file = ( |
| 544 | args.param_json_dir / PROFILE_OPS_INFO[args.profile]["framework_tests"] |
| 545 | ) |
| 546 | try: |
| 547 | with open(test_picks_file, "r") as fd: |
| 548 | test_picks = json.load(fd) |
| 549 | except Exception as e: |
| 550 | logger.error( |
| 551 | f"Couldn't load framework tests info - {test_picks_file}: {e}" |
| 552 | ) |
| 553 | return 1 |
| 554 | |
| 555 | operators = args.operators |
| 556 | if not operators: |
| 557 | # Create tests for all the operators |
| 558 | operators = list(test_picks.keys()) |
| 559 | |
| 560 | root_output_dir = args.output_dir / "frameworks" / "tflite" / "operators" |
| 561 | for op in operators: |
| 562 | if op not in test_picks: |
| 563 | logger.warning( |
| 564 | f"Framework op {op} not found in {test_picks_file} - skipping" |
| 565 | ) |
| 566 | continue |
| 567 | |
| 568 | logger.debug(f"Copying and renaming {op}") |
| 569 | framework_test_dir = copy_rename_framework_tests(args, op, test_picks) |
| 570 | if args.convert_all_tests: |
| 571 | logger.debug("Running and converting all framework tests") |
| 572 | convert_tests( |
| 573 | args, |
| 574 | op, |
| 575 | framework_test_dir, |
| 576 | root_output_dir, |
| 577 | trim_op_subdir=True, |
| 578 | ) |
| 579 | else: |
| 580 | framework_tests = get_framework_tests_selection( |
| 581 | args, op, test_picks, framework_test_dir |
| 582 | ) |
| 583 | convert_tests( |
| 584 | args, |
| 585 | op, |
| 586 | framework_test_dir, |
| 587 | root_output_dir, |
| 588 | tests=framework_tests, |
| 589 | trim_op_subdir=True, |
| 590 | ) |
| 591 | |
| 592 | # Operator unit tests |
| 593 | if args.unit_tests in ["operator", "both"]: |
| 594 | logger.debug("Creating OPERATOR unit tests") |
| 595 | test_params_file = ( |
| 596 | args.param_json_dir |
| 597 | / PROFILE_OPS_INFO[args.profile]["operator_test_params"] |
| 598 | ) |
| 599 | try: |
| 600 | with open(test_params_file, "r") as fd: |
| 601 | test_params = json.load(fd) |
| 602 | except Exception as e: |
| 603 | logger.error( |
| 604 | f"Couldn't load operator test params - {test_params_file}: {e}" |
| 605 | ) |
| 606 | return 1 |
| 607 | |
| 608 | operators = args.operators |
| 609 | if not operators: |
| 610 | # Create tests for all the operators |
| 611 | operators = list(test_params.keys()) |
| 612 | |
| 613 | for op in operators: |
| 614 | if op not in test_params: |
| 615 | logger.warning( |
| 616 | f"{op} operator parameters not found in {test_params_file} - skipping" |
| 617 | ) |
| 618 | continue |
| 619 | |
| 620 | if ( |
| 621 | args.test_type == "negative" |
| 622 | and "no_negative_tests" in test_params[op] |
| 623 | and test_params[op]["no_negative_tests"] |
| 624 | ): |
| 625 | logger.warning(f"No negative tests for {op}") |
| 626 | continue |
| 627 | |
| 628 | op_build_dir = build_op_tests(args, op, test_params) |
| 629 | |
| 630 | operator_group = test_params[op]["group"] |
| 631 | root_output_dir = args.output_dir / "operators" |
| 632 | if args.convert_all_tests: |
| 633 | logger.debug(f"Running and converting all {op} tests") |
| 634 | generate_results(args, op, op_build_dir) |
| 635 | output_dir = convert_tests( |
| 636 | args, op, op_build_dir, root_output_dir, group=operator_group |
| 637 | ) |
| 638 | else: |
| 639 | if args.test_type in ["positive", "both"]: |
| 640 | tests_gen1, tests_gen2 = tee( |
| 641 | get_op_tests_selection(args, op, op_build_dir, test_params) |
| 642 | ) |
| 643 | generate_results(args, op, op_build_dir, tests_gen1) |
| 644 | output_dir = convert_tests( |
| 645 | args, |
| 646 | op, |
| 647 | op_build_dir, |
| 648 | root_output_dir, |
| 649 | tests=tests_gen2, |
| 650 | group=operator_group, |
| 651 | ) |
| 652 | if args.test_type in ["negative", "both"] and ( |
| 653 | "no_negative_tests" not in test_params[op] |
| 654 | or not test_params[op]["no_negative_tests"] |
| 655 | ): |
| 656 | negative_tests = get_op_tests_selection( |
| 657 | args, op, op_build_dir, test_params, negative=True |
| 658 | ) |
| 659 | output_dir = convert_tests( |
| 660 | args, |
| 661 | op, |
| 662 | op_build_dir, |
| 663 | root_output_dir, |
| 664 | tests=negative_tests, |
| 665 | group=operator_group, |
| 666 | ) |
| 667 | if not args.keep_large_files: |
| 668 | check_op_tests(args, op, output_dir) |
| 669 | except GenConformanceError: |
| 670 | return 1 |
| 671 | |
| 672 | return 0 |
| 673 | |
| 674 | |
| 675 | if __name__ == "__main__": |
| 676 | exit(main()) |