Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 1 | #!/usr/bin/env python3 |
| 2 | # Copyright (c) 2021-2022, ARM Limited. |
| 3 | # SPDX-License-Identifier: Apache-2.0 |
| 4 | """Build conformance tests. |
| 5 | |
| 6 | Steps: |
| 7 | - Specific input shapes (or tests) are specified and produced by using the |
| 8 | settings in the .json files. |
| 9 | - Tests are selected to produce a good coverage. |
| 10 | - Tests are run on the reference model to produce the correct output files. |
| 11 | - Tests are converted into JSON format and saved to desired output directory. |
| 12 | """ |
| 13 | import argparse |
| 14 | import json |
| 15 | import logging |
| 16 | import multiprocessing as mp |
| 17 | import os |
| 18 | import shlex |
| 19 | import shutil |
| 20 | import subprocess |
| 21 | from functools import partial |
| 22 | from itertools import tee |
| 23 | from pathlib import Path |
| 24 | |
| 25 | from conformance.test_select import Operator |
| 26 | from convert2conformance.convert2conformance import main as c2c_main |
| 27 | from distutils.dir_util import copy_tree |
| 28 | |
| 29 | logging.basicConfig() |
| 30 | logger = logging.getLogger("tosa_verif_conformance_generator") |
| 31 | |
| 32 | # Configuration for each TOSA profile |
| 33 | PROFILE_OPS_INFO = { |
Jeremy Johnson | 8858862 | 2022-07-12 16:42:29 +0100 | [diff] [blame] | 34 | "tosa-bi": { |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 35 | "operator_test_params": "tosa_base_profile_ops_info.json", |
| 36 | "framework_tests": "tosa_base_profile_framework_ops_info.json", |
Jeremy Johnson | 93d4390 | 2022-09-27 12:26:14 +0100 | [diff] [blame] | 37 | "exclude_types": [], |
Jeremy Johnson | e4b08ff | 2022-09-15 10:38:17 +0100 | [diff] [blame] | 38 | }, |
| 39 | "tosa-mi": { |
| 40 | # Note: This is just the extra tests not in the base profile! |
| 41 | "operator_test_params": "tosa_main_profile_ops_info.json", |
| 42 | "framework_tests": "tosa_main_profile_framework_ops_info.json", |
| 43 | "exclude_types": [], |
| 44 | }, |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 45 | } |
Jeremy Johnson | e4b08ff | 2022-09-15 10:38:17 +0100 | [diff] [blame] | 46 | PROFILES_ALL = "all" |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 47 | |
| 48 | LOCATION_REF_MODEL_BINARY = Path("build/reference_model/tosa_reference_model") |
| 49 | |
Jeremy Johnson | 93d4390 | 2022-09-27 12:26:14 +0100 | [diff] [blame] | 50 | DEFAULT_SEED = 42 |
| 51 | |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 52 | |
| 53 | class GenConformanceError(Exception): |
| 54 | """Generation error reporting exception.""" |
| 55 | |
| 56 | pass |
| 57 | |
| 58 | |
| 59 | def _run_sh_command(args, cwd, full_cmd): |
| 60 | """Run an external command and capture stdout/stderr.""" |
| 61 | # Quote the command line for printing |
| 62 | full_cmd_esc = [shlex.quote(x) for x in full_cmd] |
| 63 | if args.capture_output: |
| 64 | logger.debug(f"Command: {full_cmd_esc}") |
| 65 | |
| 66 | rc = subprocess.run( |
| 67 | full_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd |
| 68 | ) |
| 69 | |
| 70 | if args.capture_output: |
| 71 | stdout = rc.stdout.decode("utf-8") |
| 72 | logger.debug(f"stdout: \n{stdout}") |
| 73 | if rc.returncode != 0: |
| 74 | |
| 75 | raise Exception( |
| 76 | "Error running command: {}.\n{}".format( |
| 77 | " ".join(full_cmd_esc), rc.stderr.decode("utf-8") |
| 78 | ) |
| 79 | ) |
| 80 | return (rc.stdout, rc.stderr) |
| 81 | |
| 82 | |
Jeremy Johnson | d88c3b3 | 2022-12-01 14:46:14 +0000 | [diff] [blame] | 83 | def build_op_tests(args, profile, operator, test_params): |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 84 | """Build tests for a given operator. |
| 85 | |
| 86 | Builds a set of tests based on the parameters defined in test_params |
| 87 | |
| 88 | Returns operator output directory |
| 89 | """ |
| 90 | assert operator in test_params |
| 91 | |
| 92 | build_tests_cmd = "tosa_verif_build_tests" |
Jeremy Johnson | d88c3b3 | 2022-12-01 14:46:14 +0000 | [diff] [blame] | 93 | op_build_dir = args.build_dir / profile |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 94 | |
Jeremy Johnson | dd8d9c2 | 2022-12-12 14:18:10 +0000 | [diff] [blame^] | 95 | build_cmd_base = [ |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 96 | build_tests_cmd, |
| 97 | "--filter", |
| 98 | operator, |
| 99 | "-o", |
| 100 | str(op_build_dir), |
| 101 | "--seed", |
Jeremy Johnson | 93d4390 | 2022-09-27 12:26:14 +0100 | [diff] [blame] | 102 | str(args.random_seed), |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 103 | ] |
| 104 | |
Jeremy Johnson | dd8d9c2 | 2022-12-12 14:18:10 +0000 | [diff] [blame^] | 105 | build_cmds_list = [] |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 106 | |
| 107 | if args.test_type in ["positive", "both"]: |
| 108 | # Append extra parameters and run test generator for each set of parameters. |
| 109 | for arglist in test_params[operator]["generator_args"]: |
Jeremy Johnson | dd8d9c2 | 2022-12-12 14:18:10 +0000 | [diff] [blame^] | 110 | build_cmd_pos_test = build_cmd_base.copy() |
| 111 | build_cmd_pos_test.extend(["--test-type", "positive"]) |
| 112 | build_cmd_pos_test.extend(arglist) |
| 113 | build_cmds_list.append(build_cmd_pos_test) |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 114 | |
| 115 | if args.test_type in ["negative", "both"]: |
Jeremy Johnson | 93d4390 | 2022-09-27 12:26:14 +0100 | [diff] [blame] | 116 | # Get target-dtypes options only to limit tests to those needed |
| 117 | target_dtypes_args = [] |
| 118 | for arglist in test_params[operator]["generator_args"]: |
| 119 | idx = 0 |
| 120 | while idx < len(arglist): |
| 121 | if arglist[idx] == "--target-dtype": |
| 122 | if arglist[idx + 1] not in target_dtypes_args: |
| 123 | target_dtypes_args.extend(arglist[idx : idx + 2]) |
| 124 | idx += 1 # skip over option (and then argument below) |
| 125 | idx += 1 |
Jeremy Johnson | dd8d9c2 | 2022-12-12 14:18:10 +0000 | [diff] [blame^] | 126 | build_cmd_neg_test = build_cmd_base.copy() |
| 127 | build_cmd_neg_test.extend(["--test-type", "negative"]) |
Jeremy Johnson | 93d4390 | 2022-09-27 12:26:14 +0100 | [diff] [blame] | 128 | # Limit sizes of negative tests |
Jeremy Johnson | dd8d9c2 | 2022-12-12 14:18:10 +0000 | [diff] [blame^] | 129 | dim_range = ( |
| 130 | test_params[operator]["generator_negative_dim_range"] |
| 131 | if "generator_negative_dim_range" in test_params[operator] |
| 132 | else "1,16" |
| 133 | ) |
| 134 | build_cmd_neg_test.extend(["--tensor-dim-range", dim_range]) |
| 135 | build_cmd_neg_test.extend(target_dtypes_args) |
| 136 | build_cmds_list.append(build_cmd_neg_test) |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 137 | |
Jeremy Johnson | dd8d9c2 | 2022-12-12 14:18:10 +0000 | [diff] [blame^] | 138 | logger.debug(f"Creating {operator} tests with {len(build_cmds_list)} parameter(s)") |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 139 | error = False |
Jeremy Johnson | dd8d9c2 | 2022-12-12 14:18:10 +0000 | [diff] [blame^] | 140 | for i, cmd in enumerate(build_cmds_list): |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 141 | try: |
| 142 | _run_sh_command(args, args.ref_model_dir.absolute(), cmd) |
| 143 | logger.info( |
Jeremy Johnson | dd8d9c2 | 2022-12-12 14:18:10 +0000 | [diff] [blame^] | 144 | f"{operator} test batch {(i+1)}/{len(build_cmds_list)} created successfully" |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 145 | ) |
| 146 | except Exception as e: |
| 147 | logger.error( |
Jeremy Johnson | dd8d9c2 | 2022-12-12 14:18:10 +0000 | [diff] [blame^] | 148 | f"{operator} test batch {(i+1)}/{len(build_cmds_list)} unsuccessful, skipping" |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 149 | ) |
| 150 | logger.error(f" build_op_tests error: {e} ") |
| 151 | error = True |
| 152 | if error: |
| 153 | raise (GenConformanceError()) |
| 154 | |
| 155 | return op_build_dir |
| 156 | |
| 157 | |
| 158 | def _check_to_include_test(profile, test_name, exclude_negative_tests=False): |
| 159 | """Check test name for exclusions, return False to indicate excluded.""" |
| 160 | excludes = ["ERRORIF"] if exclude_negative_tests else [] |
| 161 | excludes.extend(PROFILE_OPS_INFO[profile]["exclude_types"]) |
| 162 | |
| 163 | for exclusion in excludes: |
| 164 | if f"_{exclusion}_" in test_name: |
| 165 | return False |
| 166 | return True |
| 167 | |
| 168 | |
| 169 | def _get_all_tests_list( |
| 170 | profile, test_root_dir, operator, exclude_negative_tests=False, include_all=False |
| 171 | ): |
| 172 | """Create test list based on tests in the test_dir.""" |
| 173 | test_dir = test_root_dir / operator |
| 174 | if not test_dir.is_dir(): |
| 175 | # Tests are split into multiple dirs, for example: conv2d_1x1, conv2d_3x3 |
| 176 | test_dir = test_root_dir |
| 177 | directories = [ |
| 178 | tdir for tdir in test_dir.glob("*") if tdir.name.startswith(operator) |
| 179 | ] |
| 180 | else: |
| 181 | directories = [test_dir] |
| 182 | |
| 183 | tests = [] |
| 184 | for tdir in directories: |
| 185 | tests.extend( |
| 186 | [ |
| 187 | test |
| 188 | for test in tdir.glob("*") |
| 189 | if include_all |
| 190 | or _check_to_include_test(profile, test.name, exclude_negative_tests) |
| 191 | ] |
| 192 | ) |
| 193 | return tests |
| 194 | |
| 195 | |
Jeremy Johnson | e4b08ff | 2022-09-15 10:38:17 +0100 | [diff] [blame] | 196 | def generate_results(args, profile, operator, op_build_dir, tests=None): |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 197 | """Run tests on reference model and save result to the test directory.""" |
| 198 | num_cores = args.num_cores |
| 199 | run_tests_cmd = "tosa_verif_run_tests" |
| 200 | |
| 201 | ref_model_path = args.ref_model_dir / LOCATION_REF_MODEL_BINARY |
| 202 | ref_cmd_base = ref_cmd = [ |
| 203 | run_tests_cmd, |
| 204 | "--ref-model-path", |
| 205 | str(ref_model_path.absolute()), |
| 206 | "-j", |
| 207 | str(num_cores), |
| 208 | "-v", |
| 209 | "-t", |
| 210 | ] |
| 211 | ref_cmds = [] |
| 212 | |
| 213 | if not tests: |
| 214 | # Do not need to run ERRORIF tests as they don't have result files |
| 215 | tests = _get_all_tests_list( |
Jeremy Johnson | e4b08ff | 2022-09-15 10:38:17 +0100 | [diff] [blame] | 216 | profile, op_build_dir, operator, exclude_negative_tests=True |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 217 | ) |
| 218 | |
| 219 | for test in tests: |
| 220 | ref_cmd = ref_cmd_base.copy() |
| 221 | ref_cmd.append(str(test)) |
| 222 | ref_cmds.append(ref_cmd) |
| 223 | |
| 224 | fail_string = "UNEXPECTED_FAILURE" |
| 225 | failed_counter = 0 |
| 226 | |
| 227 | job_pool = mp.Pool(args.num_cores) |
| 228 | sh_partial = partial(_run_sh_command, args, args.ref_model_dir.absolute()) |
| 229 | pool_results = job_pool.map(sh_partial, ref_cmds) |
| 230 | job_pool.close() |
| 231 | job_pool.join() |
| 232 | |
| 233 | # Use captured output for run_sh_command to work out if test passed. |
| 234 | for i, rc in enumerate(pool_results): |
| 235 | if fail_string in str(rc[0]): |
| 236 | logger.error(f"Test {i+1}/{len(ref_cmds)}: {ref_cmds[i][-1]} failed.") |
| 237 | failed_counter += 1 |
| 238 | else: |
| 239 | logger.info(f"Test {i+1}/{len(ref_cmds)}: {ref_cmds[i][-1]} passed.") |
| 240 | |
| 241 | logger.info(f"{len(ref_cmds)-failed_counter}/{len(ref_cmds)} tests passed") |
| 242 | logger.info("Ran tests on model and saved results of passing tests") |
| 243 | |
| 244 | |
| 245 | def convert_tests( |
| 246 | args, |
Jeremy Johnson | e4b08ff | 2022-09-15 10:38:17 +0100 | [diff] [blame] | 247 | profile, |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 248 | operator, |
| 249 | op_build_dir, |
| 250 | output_dir, |
Jeremy Johnson | e4b08ff | 2022-09-15 10:38:17 +0100 | [diff] [blame] | 251 | op_profiles_list, |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 252 | tests=None, |
| 253 | group=None, |
| 254 | trim_op_subdir=False, |
| 255 | ): |
| 256 | """Convert tests to JSON and save to output directory.""" |
| 257 | ref_model_dir = args.ref_model_dir |
| 258 | |
| 259 | if group: |
| 260 | output_dir = output_dir / group |
| 261 | |
Jeremy Johnson | dd8d9c2 | 2022-12-12 14:18:10 +0000 | [diff] [blame^] | 262 | c2c_args_base = ["--strict", "--ref-model-directory", str(ref_model_dir)] |
Jeremy Johnson | e4b08ff | 2022-09-15 10:38:17 +0100 | [diff] [blame] | 263 | # This op maybe in more than one profile - e.g. tosa_bi and tosa_mi |
| 264 | # even if we are only producing tests for tosa_mi |
| 265 | for op_profile in op_profiles_list: |
Jeremy Johnson | dd8d9c2 | 2022-12-12 14:18:10 +0000 | [diff] [blame^] | 266 | c2c_args_base.extend(["--profile", op_profile]) |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 267 | if args.framework_schema: |
Jeremy Johnson | dd8d9c2 | 2022-12-12 14:18:10 +0000 | [diff] [blame^] | 268 | c2c_args_base.extend(["--framework-schema", str(args.framework_schema)]) |
| 269 | c2c_args_base.append("--output-directory") |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 270 | |
Jeremy Johnson | dd8d9c2 | 2022-12-12 14:18:10 +0000 | [diff] [blame^] | 271 | c2c_args_list = [] |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 272 | |
| 273 | if not tests: |
Jeremy Johnson | e4b08ff | 2022-09-15 10:38:17 +0100 | [diff] [blame] | 274 | tests = _get_all_tests_list(profile, op_build_dir, operator) |
| 275 | logger.info(f"Converting all {profile} profile tests") |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 276 | |
| 277 | # Controls if we copy the tests in their operator sub-directory or not |
| 278 | output_dir_relative_pos = -1 if trim_op_subdir else -2 |
| 279 | for test in tests: |
| 280 | logger.info(f"Test chosen: {test}") |
Jeremy Johnson | dd8d9c2 | 2022-12-12 14:18:10 +0000 | [diff] [blame^] | 281 | c2c_args = c2c_args_base.copy() |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 282 | full_output_directory = output_dir / test.relative_to( |
| 283 | *test.parts[:output_dir_relative_pos] |
| 284 | ) |
Jeremy Johnson | dd8d9c2 | 2022-12-12 14:18:10 +0000 | [diff] [blame^] | 285 | c2c_args.append(str(full_output_directory)) |
| 286 | c2c_args.append(str(test)) |
| 287 | c2c_args_list.append(c2c_args) |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 288 | |
Jeremy Johnson | dd8d9c2 | 2022-12-12 14:18:10 +0000 | [diff] [blame^] | 289 | if len(c2c_args_list) == 0: |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 290 | logger.warning("No tests found. Nothing to convert") |
| 291 | return |
| 292 | |
| 293 | job_pool = mp.Pool(args.num_cores) |
| 294 | |
Jeremy Johnson | dd8d9c2 | 2022-12-12 14:18:10 +0000 | [diff] [blame^] | 295 | pool_results = job_pool.map(c2c_main, c2c_args_list) |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 296 | job_pool.close() |
| 297 | job_pool.join() |
| 298 | |
| 299 | failed_counter = 0 |
| 300 | for i, result in enumerate(pool_results): |
| 301 | if result != 0: |
| 302 | logger.error( |
Jeremy Johnson | dd8d9c2 | 2022-12-12 14:18:10 +0000 | [diff] [blame^] | 303 | f"test {i+1}/{len(c2c_args_list)}: {c2c_args_list[i][-1]} failed to convert." |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 304 | ) |
| 305 | failed_counter += 1 |
| 306 | else: |
Jeremy Johnson | dd8d9c2 | 2022-12-12 14:18:10 +0000 | [diff] [blame^] | 307 | logger.info( |
| 308 | f"test {i+1}/{len(c2c_args_list)}: {c2c_args_list[i][-1]} converted" |
| 309 | ) |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 310 | logger.info( |
Jeremy Johnson | dd8d9c2 | 2022-12-12 14:18:10 +0000 | [diff] [blame^] | 311 | f"{len(c2c_args_list)-failed_counter}/{len(c2c_args_list)} tests successfully converted" |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 312 | ) |
| 313 | |
| 314 | if failed_counter > 0: |
| 315 | logger.error(f"Stopping due to {failed_counter} test conversion errors") |
| 316 | raise (GenConformanceError()) |
| 317 | |
| 318 | logger.info("Converted tests to JSON and saved to output directory") |
| 319 | |
| 320 | return output_dir |
| 321 | |
| 322 | |
Jeremy Johnson | e4b08ff | 2022-09-15 10:38:17 +0100 | [diff] [blame] | 323 | def get_op_tests_selection( |
| 324 | args, profile, operator, op_build_dir, test_params, negative=False |
| 325 | ): |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 326 | """Use test picker to get subsection of tests generated.""" |
| 327 | assert operator in test_params |
Jeremy Johnson | e4b08ff | 2022-09-15 10:38:17 +0100 | [diff] [blame] | 328 | logger.info("Choosing {} tests".format(("negative" if negative else "positive"))) |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 329 | try: |
| 330 | op_params = test_params[operator] |
| 331 | op = Operator.registry[operator]( |
| 332 | op_build_dir, |
| 333 | op_params, |
| 334 | negative, |
Jeremy Johnson | e4b08ff | 2022-09-15 10:38:17 +0100 | [diff] [blame] | 335 | exclude_types=PROFILE_OPS_INFO[profile]["exclude_types"], |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 336 | ) |
| 337 | except KeyError: |
| 338 | logger.error(f"{operator} operator is not supported by test_select") |
| 339 | raise (GenConformanceError()) |
| 340 | |
| 341 | return op.select_tests() |
| 342 | |
| 343 | |
Jeremy Johnson | e4b08ff | 2022-09-15 10:38:17 +0100 | [diff] [blame] | 344 | def check_op_tests(args, profile, operator, output_dir): |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 345 | """Move test folders than contain files larger than 30MB to new directory.""" |
| 346 | destination_dir = str(args.output_dir) + "_large_files" |
| 347 | |
Jeremy Johnson | e4b08ff | 2022-09-15 10:38:17 +0100 | [diff] [blame] | 348 | tests = _get_all_tests_list(profile, output_dir, operator, include_all=True) |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 349 | if not tests: |
| 350 | logger.error( |
| 351 | f"Couldn't find any tests to size check for {operator} in {output_dir}" |
| 352 | ) |
| 353 | raise (GenConformanceError()) |
| 354 | |
| 355 | for tdir in tests: |
| 356 | move_dir = False |
| 357 | test_files = [file for file in tdir.glob("*")] |
| 358 | for file in test_files: |
| 359 | file_size = os.stat(file).st_size / 1024**2 |
| 360 | if file_size > 30: |
| 361 | move_dir = True |
| 362 | |
| 363 | if move_dir: |
| 364 | move_destination = destination_dir / tdir.relative_to(output_dir) |
| 365 | logger.warning( |
| 366 | f"{tdir.relative_to(output_dir)} contains files that are too large (>30MB), test moved to new folder: {destination_dir}" |
| 367 | ) |
| 368 | |
| 369 | if move_destination.is_dir(): |
| 370 | logger.warning( |
| 371 | f"{move_destination} directory already exists, deleting existing." |
| 372 | ) |
| 373 | shutil.rmtree(str(move_destination)) |
| 374 | shutil.move(str(tdir), move_destination) |
| 375 | |
| 376 | |
| 377 | def copy_rename_framework_tests(args, operator, test_picks): |
| 378 | """Copy framework tests into new folder and rename them if needed. |
| 379 | |
| 380 | The tests are renamed to match the framework operator names if an |
| 381 | alternate name has been used instead. |
| 382 | """ |
| 383 | framework_tests_dir = args.framework_tests_dir |
| 384 | new_tests_dir = args.build_dir / "frameworks" / operator |
| 385 | os.makedirs(new_tests_dir, exist_ok=True) |
| 386 | |
| 387 | # Get the framework tests operator name |
| 388 | if "alternate_names" in test_picks[operator]: |
| 389 | alternate_names = test_picks[operator]["alternate_names"] |
| 390 | else: |
| 391 | alternate_names = [operator] |
| 392 | |
| 393 | # Get the alternate named test directories for the operator |
| 394 | for alt_name in alternate_names: |
| 395 | test_prefix = f"test_{alt_name}" |
| 396 | test_dirs = list(framework_tests_dir.glob(f"{test_prefix}_*")) |
| 397 | |
| 398 | # Copy tests to new directory and rename to match framework operator names |
| 399 | # - if there is just 1 alternate name, replace the full test prefix |
| 400 | # test_add_... -> add_... |
| 401 | # - if there are multiple alternate names, just replace the "test" |
| 402 | # test_concatv2_... -> concatenation_concatv2_... |
| 403 | old_prefix = test_prefix if len(alternate_names) == 1 else "test" |
| 404 | |
| 405 | for tdir in test_dirs: |
| 406 | new_test_name = tdir.name.replace(old_prefix, operator) |
| 407 | copy_destination = new_tests_dir / new_test_name |
| 408 | logger.debug(f"copying test folder {tdir} to {copy_destination}") |
| 409 | copy_tree(str(tdir), str(copy_destination)) |
| 410 | |
| 411 | logger.info(f"Copied and renamed {len(test_dirs)} framework test folders") |
| 412 | return new_tests_dir.parent |
| 413 | |
| 414 | |
| 415 | def get_framework_tests_selection(args, operator, test_picks, op_build_dir): |
| 416 | """Get the list of pre-chosen tests with relative paths.""" |
| 417 | try: |
| 418 | tests = test_picks[operator]["tests"] |
| 419 | except KeyError: |
| 420 | logger.error(f"Framework test selection not defined for {operator} operator") |
| 421 | raise (GenConformanceError()) |
| 422 | |
| 423 | test_paths = [op_build_dir / operator / test for test in tests] |
| 424 | return test_paths |
| 425 | |
| 426 | |
| 427 | def parse_args(argv=None): |
| 428 | """Parse the arguments.""" |
| 429 | parser = argparse.ArgumentParser() |
Jeremy Johnson | 8858862 | 2022-07-12 16:42:29 +0100 | [diff] [blame] | 430 | profiles = list(PROFILE_OPS_INFO.keys()) |
Jeremy Johnson | e4b08ff | 2022-09-15 10:38:17 +0100 | [diff] [blame] | 431 | profiles.append(PROFILES_ALL) |
Jeremy Johnson | 8858862 | 2022-07-12 16:42:29 +0100 | [diff] [blame] | 432 | parser.add_argument( |
| 433 | "--profile", |
| 434 | dest="profile", |
| 435 | choices=profiles, |
| 436 | default=profiles[0], |
| 437 | type=str, |
| 438 | help=f"TOSA profile (default is {profiles[0]})", |
| 439 | ) |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 440 | parser.add_argument( |
| 441 | "--operators", |
| 442 | type=str, |
| 443 | nargs="*", |
| 444 | help="The operator(s) to create tests for, if not supplied all tests will be created", |
| 445 | ) |
| 446 | parser.add_argument( |
Jeremy Johnson | 8858862 | 2022-07-12 16:42:29 +0100 | [diff] [blame] | 447 | "--unit-tests", |
| 448 | dest="unit_tests", |
| 449 | choices=["operator", "framework", "both"], |
| 450 | default="operator", |
| 451 | type=str, |
| 452 | help="Which unit tests are produced (default is operator)", |
| 453 | ) |
| 454 | parser.add_argument( |
| 455 | "--test-type", |
| 456 | dest="test_type", |
| 457 | choices=["positive", "negative", "both"], |
| 458 | default="both", |
| 459 | type=str, |
| 460 | help="Type of tests produced (default is both)", |
| 461 | ) |
| 462 | parser.add_argument( |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 463 | "--ref-model-directory", |
| 464 | dest="ref_model_dir", |
| 465 | type=Path, |
| 466 | required=True, |
| 467 | help="Reference Model directory (must be pre-built)", |
| 468 | ) |
Jeremy Johnson | 8858862 | 2022-07-12 16:42:29 +0100 | [diff] [blame] | 469 | parser.add_argument( |
Jeremy Johnson | 93d4390 | 2022-09-27 12:26:14 +0100 | [diff] [blame] | 470 | "--seed", |
| 471 | dest="random_seed", |
| 472 | default=DEFAULT_SEED, |
| 473 | type=int, |
| 474 | help="Random test seed", |
| 475 | ) |
| 476 | parser.add_argument( |
Jeremy Johnson | 8858862 | 2022-07-12 16:42:29 +0100 | [diff] [blame] | 477 | "--framework-tests-directory", |
| 478 | dest="framework_tests_dir", |
| 479 | type=Path, |
| 480 | default=Path.cwd() / "tests", |
| 481 | help="The pre-built framework tests directory (default is tests)", |
| 482 | ) |
| 483 | parser.add_argument( |
| 484 | "--framework-schema", |
| 485 | dest="framework_schema", |
| 486 | type=Path, |
| 487 | help="Framework flatbuffers schema needed to convert framework models", |
| 488 | ) |
| 489 | parser.add_argument( |
| 490 | "--build-directory", |
| 491 | dest="build_dir", |
| 492 | type=Path, |
| 493 | default=Path.cwd() / "conformance_build", |
| 494 | help="Temporary build directory for files created during this process (default is conformance_build)", |
| 495 | ) |
| 496 | parser.add_argument( |
| 497 | "--output-directory", |
| 498 | dest="output_dir", |
| 499 | type=Path, |
| 500 | default=Path.cwd() / "conformance", |
| 501 | help="Output directory (default is conformance)", |
| 502 | ) |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 503 | script_dir = Path(__file__).parent.absolute() |
| 504 | parser.add_argument( |
| 505 | "--test-param-json-directory", |
| 506 | dest="param_json_dir", |
| 507 | type=Path, |
| 508 | default=script_dir, |
Jeremy Johnson | 8858862 | 2022-07-12 16:42:29 +0100 | [diff] [blame] | 509 | help=f"Test parameters (ops info) JSON file directory (default is {script_dir})", |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 510 | ) |
| 511 | parser.add_argument( |
| 512 | "--convert-all-tests", |
| 513 | action="store_true", |
| 514 | help="Converts all tests instead of those picked by test_select", |
| 515 | ) |
| 516 | parser.add_argument( |
| 517 | "--keep-large-files", |
| 518 | action="store_true", |
| 519 | help="Keeps tests that contain files larger than 30MB in output directory", |
| 520 | ) |
| 521 | parser.add_argument( |
| 522 | "--capture-output", |
| 523 | action="store_true", |
| 524 | help="Prints output of running sh commands", |
| 525 | ) |
| 526 | parser.add_argument( |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 527 | "-j", |
| 528 | dest="num_cores", |
| 529 | type=int, |
| 530 | default=6, |
| 531 | help="Number of simultaneous jobs to split the tasks into for multiprocessing", |
| 532 | ) |
| 533 | parser.add_argument( |
| 534 | "-v", |
| 535 | dest="verbosity", |
| 536 | action="count", |
| 537 | default=0, |
| 538 | help="Verbosity (can be used multiple times for more details)", |
| 539 | ) |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 540 | args = parser.parse_args(argv) |
| 541 | |
| 542 | return args |
| 543 | |
| 544 | |
| 545 | def main(): |
| 546 | args = parse_args() |
| 547 | |
| 548 | if not args.ref_model_dir.is_dir(): |
| 549 | logger.error( |
| 550 | f"Missing or invalid reference model directory: {args.ref_model_dir}" |
| 551 | ) |
| 552 | return 2 |
| 553 | else: |
| 554 | ref_model = args.ref_model_dir / LOCATION_REF_MODEL_BINARY |
| 555 | if not ref_model.is_file(): |
| 556 | logger.error( |
| 557 | f"{LOCATION_REF_MODEL_BINARY} not found in {args.ref_model_dir}\nHave you built the reference model?" |
| 558 | ) |
| 559 | return 2 |
| 560 | if args.unit_tests in ["framework", "both"]: |
| 561 | if not args.framework_schema: |
| 562 | logger.error( |
| 563 | "Need to supply location of Framework flatbuffers schema via --framework-schema" |
| 564 | ) |
| 565 | return 2 |
| 566 | if not args.framework_tests_dir.is_dir(): |
| 567 | logger.error( |
| 568 | f"Missing or invalid framework tests directory: {args.framework_tests_dir}" |
| 569 | ) |
| 570 | return 2 |
| 571 | |
| 572 | loglevels = (logging.WARNING, logging.INFO, logging.DEBUG) |
| 573 | loglevel = loglevels[min(args.verbosity, len(loglevels) - 1)] |
| 574 | logger.setLevel(loglevel) |
| 575 | # Set other loggers the same |
| 576 | logging.getLogger("test_select").setLevel(loglevel) |
| 577 | logging.getLogger("convert2conformance").setLevel(loglevel) |
| 578 | |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 579 | print(f"Output directory: {args.output_dir}") |
| 580 | |
Jeremy Johnson | 93d4390 | 2022-09-27 12:26:14 +0100 | [diff] [blame] | 581 | if args.random_seed != DEFAULT_SEED: |
| 582 | logger.warning( |
| 583 | "Random test seed changed from default, tests will not match official conformance" |
| 584 | ) |
| 585 | |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 586 | args.build_dir = args.build_dir.resolve() |
| 587 | logger.debug(f"Creating build directory: {args.build_dir}") |
| 588 | args.build_dir.mkdir(parents=True, exist_ok=True) |
| 589 | |
Jeremy Johnson | e4b08ff | 2022-09-15 10:38:17 +0100 | [diff] [blame] | 590 | # TODO: For tosa-mi should really generate tosa-bi profile as well |
| 591 | # - for now leave it as subset instead of as superset (for testing) |
| 592 | if args.profile == PROFILES_ALL: |
| 593 | profiles = list(PROFILE_OPS_INFO.keys()) |
| 594 | else: |
| 595 | profiles = [args.profile] |
| 596 | |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 597 | try: |
Jeremy Johnson | e4b08ff | 2022-09-15 10:38:17 +0100 | [diff] [blame] | 598 | for profile in profiles: |
| 599 | print(f"Creating conformance tests for TOSA {profile} profile") |
| 600 | # Framework unit tests |
| 601 | if args.unit_tests in ["framework", "both"]: |
| 602 | logger.debug("Creating FRAMEWORK unit tests") |
| 603 | test_picks_file = ( |
| 604 | args.param_json_dir / PROFILE_OPS_INFO[profile]["framework_tests"] |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 605 | ) |
Jeremy Johnson | e4b08ff | 2022-09-15 10:38:17 +0100 | [diff] [blame] | 606 | try: |
| 607 | with open(test_picks_file, "r") as fd: |
| 608 | test_picks = json.load(fd) |
| 609 | except Exception as e: |
| 610 | logger.error( |
| 611 | f"Couldn't load framework tests info - {test_picks_file}: {e}" |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 612 | ) |
Jeremy Johnson | e4b08ff | 2022-09-15 10:38:17 +0100 | [diff] [blame] | 613 | return 1 |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 614 | |
Jeremy Johnson | e4b08ff | 2022-09-15 10:38:17 +0100 | [diff] [blame] | 615 | operators = args.operators |
| 616 | if not operators: |
| 617 | # Create tests for all the operators |
| 618 | operators = list(test_picks.keys()) |
| 619 | |
| 620 | root_output_dir = ( |
| 621 | args.output_dir / "frameworks" / "tflite" / "operators" |
| 622 | ) |
| 623 | for op in operators: |
| 624 | logger.info(f"FRAMEWORK OP: {op}") |
| 625 | if op not in test_picks: |
| 626 | logger.warning( |
| 627 | f"Framework op {op} not found in {test_picks_file} - skipping" |
| 628 | ) |
| 629 | continue |
| 630 | |
| 631 | op_profiles_list = test_picks[op]["profile"] |
| 632 | if ( |
| 633 | args.profile != PROFILES_ALL |
| 634 | and args.profile not in op_profiles_list |
| 635 | ): |
| 636 | # Skip this operator as not part of the profile chosen |
| 637 | logger.debug(f"Skipping {op} as not part of {args.profile}") |
| 638 | continue |
| 639 | |
| 640 | logger.debug(f"Copying and renaming {op}") |
| 641 | framework_test_dir = copy_rename_framework_tests( |
| 642 | args, op, test_picks |
| 643 | ) |
| 644 | |
| 645 | if args.convert_all_tests: |
| 646 | logger.debug("Running and converting all framework tests") |
| 647 | framework_tests = None # Don't select any |
| 648 | else: |
| 649 | logger.debug("Running and converting selected framework tests") |
| 650 | framework_tests = get_framework_tests_selection( |
| 651 | args, op, test_picks, framework_test_dir |
| 652 | ) |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 653 | convert_tests( |
| 654 | args, |
Jeremy Johnson | e4b08ff | 2022-09-15 10:38:17 +0100 | [diff] [blame] | 655 | profile, |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 656 | op, |
| 657 | framework_test_dir, |
| 658 | root_output_dir, |
Jeremy Johnson | e4b08ff | 2022-09-15 10:38:17 +0100 | [diff] [blame] | 659 | op_profiles_list, |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 660 | tests=framework_tests, |
| 661 | trim_op_subdir=True, |
| 662 | ) |
| 663 | |
Jeremy Johnson | e4b08ff | 2022-09-15 10:38:17 +0100 | [diff] [blame] | 664 | # Operator unit tests |
| 665 | if args.unit_tests in ["operator", "both"]: |
| 666 | logger.debug("Creating OPERATOR unit tests") |
| 667 | test_params_file = ( |
| 668 | args.param_json_dir |
| 669 | / PROFILE_OPS_INFO[profile]["operator_test_params"] |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 670 | ) |
Jeremy Johnson | e4b08ff | 2022-09-15 10:38:17 +0100 | [diff] [blame] | 671 | try: |
| 672 | with open(test_params_file, "r") as fd: |
| 673 | test_params = json.load(fd) |
| 674 | except Exception as e: |
| 675 | logger.error( |
| 676 | f"Couldn't load operator test params - {test_params_file}: {e}" |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 677 | ) |
Jeremy Johnson | e4b08ff | 2022-09-15 10:38:17 +0100 | [diff] [blame] | 678 | return 1 |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 679 | |
Jeremy Johnson | e4b08ff | 2022-09-15 10:38:17 +0100 | [diff] [blame] | 680 | operators = args.operators |
| 681 | if not operators: |
| 682 | # Create tests for all the operators |
| 683 | operators = list(test_params.keys()) |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 684 | |
Jeremy Johnson | e4b08ff | 2022-09-15 10:38:17 +0100 | [diff] [blame] | 685 | for op in operators: |
| 686 | logger.info(f"OPERATOR: {op}") |
| 687 | if op not in test_params: |
| 688 | logger.warning( |
| 689 | f"{op} operator parameters not found in {test_params_file} - skipping" |
| 690 | ) |
| 691 | continue |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 692 | |
Jeremy Johnson | e4b08ff | 2022-09-15 10:38:17 +0100 | [diff] [blame] | 693 | if ( |
| 694 | args.test_type == "negative" |
| 695 | and "no_negative_tests" in test_params[op] |
| 696 | and test_params[op]["no_negative_tests"] |
| 697 | ): |
| 698 | logger.warning(f"No negative tests for {op}") |
| 699 | continue |
| 700 | |
| 701 | op_profiles_list = test_params[op]["profile"] |
| 702 | if ( |
| 703 | args.profile != PROFILES_ALL |
| 704 | and args.profile not in op_profiles_list |
| 705 | ): |
| 706 | # Skip this operator as not part of the profile chosen |
| 707 | logger.debug(f"Skipping {op} as not part of {args.profile}") |
| 708 | continue |
| 709 | |
Jeremy Johnson | d88c3b3 | 2022-12-01 14:46:14 +0000 | [diff] [blame] | 710 | op_build_dir = build_op_tests(args, profile, op, test_params) |
Jeremy Johnson | e4b08ff | 2022-09-15 10:38:17 +0100 | [diff] [blame] | 711 | |
| 712 | operator_group = test_params[op]["group"] |
| 713 | root_output_dir = args.output_dir / "operators" |
| 714 | if args.convert_all_tests: |
| 715 | logger.debug(f"Running and converting all {op} tests") |
| 716 | generate_results(args, profile, op, op_build_dir) |
| 717 | operator_test_list = None |
| 718 | else: |
| 719 | logger.debug(f"Running and converting selection of {op} tests") |
| 720 | if args.test_type in ["positive", "both"]: |
| 721 | tests_gen, tests_gen2 = tee( |
| 722 | get_op_tests_selection( |
| 723 | args, profile, op, op_build_dir, test_params |
| 724 | ) |
| 725 | ) |
| 726 | generate_results(args, profile, op, op_build_dir, tests_gen) |
| 727 | operator_test_list = list(tests_gen2) |
| 728 | else: |
| 729 | operator_test_list = [] |
| 730 | if args.test_type in ["negative", "both"] and ( |
| 731 | "no_negative_tests" not in test_params[op] |
| 732 | or not test_params[op]["no_negative_tests"] |
| 733 | ): |
| 734 | operator_test_list.extend( |
| 735 | get_op_tests_selection( |
| 736 | args, |
| 737 | profile, |
| 738 | op, |
| 739 | op_build_dir, |
| 740 | test_params, |
| 741 | negative=True, |
| 742 | ) |
| 743 | ) |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 744 | output_dir = convert_tests( |
Jeremy Johnson | 8858862 | 2022-07-12 16:42:29 +0100 | [diff] [blame] | 745 | args, |
Jeremy Johnson | e4b08ff | 2022-09-15 10:38:17 +0100 | [diff] [blame] | 746 | profile, |
Jeremy Johnson | 8858862 | 2022-07-12 16:42:29 +0100 | [diff] [blame] | 747 | op, |
| 748 | op_build_dir, |
| 749 | root_output_dir, |
Jeremy Johnson | e4b08ff | 2022-09-15 10:38:17 +0100 | [diff] [blame] | 750 | op_profiles_list, |
| 751 | tests=operator_test_list, |
Jeremy Johnson | 8858862 | 2022-07-12 16:42:29 +0100 | [diff] [blame] | 752 | group=operator_group, |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 753 | ) |
Jeremy Johnson | e4b08ff | 2022-09-15 10:38:17 +0100 | [diff] [blame] | 754 | if not args.keep_large_files: |
| 755 | check_op_tests(args, profile, op, output_dir) |
Jeremy Johnson | 0ecfa37 | 2022-06-30 14:27:56 +0100 | [diff] [blame] | 756 | except GenConformanceError: |
| 757 | return 1 |
| 758 | |
| 759 | return 0 |
| 760 | |
| 761 | |
| 762 | if __name__ == "__main__": |
| 763 | exit(main()) |