blob: 7032ad498176ceb32000eaf062aa66a081ad0477 [file] [log] [blame]
Jeremy Johnson0ecfa372022-06-30 14:27:56 +01001#!/usr/bin/env python3
2# Copyright (c) 2021-2022, ARM Limited.
3# SPDX-License-Identifier: Apache-2.0
4"""Build conformance tests.
5
6Steps:
7- Specific input shapes (or tests) are specified and produced by using the
8 settings in the .json files.
9- Tests are selected to produce a good coverage.
10- Tests are run on the reference model to produce the correct output files.
11- Tests are converted into JSON format and saved to desired output directory.
12"""
13import argparse
14import json
15import logging
16import multiprocessing as mp
17import os
18import shlex
19import shutil
20import subprocess
21from functools import partial
22from itertools import tee
23from pathlib import Path
24
25from conformance.test_select import Operator
26from convert2conformance.convert2conformance import main as c2c_main
27from distutils.dir_util import copy_tree
28
29logging.basicConfig()
30logger = logging.getLogger("tosa_verif_conformance_generator")
31
32# Configuration for each TOSA profile
33PROFILE_OPS_INFO = {
Jeremy Johnson88588622022-07-12 16:42:29 +010034 "tosa-bi": {
Jeremy Johnson0ecfa372022-06-30 14:27:56 +010035 "operator_test_params": "tosa_base_profile_ops_info.json",
36 "framework_tests": "tosa_base_profile_framework_ops_info.json",
Jeremy Johnson93d43902022-09-27 12:26:14 +010037 "exclude_types": [],
Jeremy Johnson0ecfa372022-06-30 14:27:56 +010038 }
39}
40
41LOCATION_REF_MODEL_BINARY = Path("build/reference_model/tosa_reference_model")
42
Jeremy Johnson93d43902022-09-27 12:26:14 +010043DEFAULT_SEED = 42
44
Jeremy Johnson0ecfa372022-06-30 14:27:56 +010045
46class GenConformanceError(Exception):
47 """Generation error reporting exception."""
48
49 pass
50
51
52def _run_sh_command(args, cwd, full_cmd):
53 """Run an external command and capture stdout/stderr."""
54 # Quote the command line for printing
55 full_cmd_esc = [shlex.quote(x) for x in full_cmd]
56 if args.capture_output:
57 logger.debug(f"Command: {full_cmd_esc}")
58
59 rc = subprocess.run(
60 full_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd
61 )
62
63 if args.capture_output:
64 stdout = rc.stdout.decode("utf-8")
65 logger.debug(f"stdout: \n{stdout}")
66 if rc.returncode != 0:
67
68 raise Exception(
69 "Error running command: {}.\n{}".format(
70 " ".join(full_cmd_esc), rc.stderr.decode("utf-8")
71 )
72 )
73 return (rc.stdout, rc.stderr)
74
75
76def build_op_tests(args, operator, test_params):
77 """Build tests for a given operator.
78
79 Builds a set of tests based on the parameters defined in test_params
80
81 Returns operator output directory
82 """
83 assert operator in test_params
84
85 build_tests_cmd = "tosa_verif_build_tests"
86 op_build_dir = args.build_dir
87
88 ref_cmd_base = [
89 build_tests_cmd,
90 "--filter",
91 operator,
92 "-o",
93 str(op_build_dir),
94 "--seed",
Jeremy Johnson93d43902022-09-27 12:26:14 +010095 str(args.random_seed),
Jeremy Johnson0ecfa372022-06-30 14:27:56 +010096 ]
97
98 ref_cmds = []
99
100 if args.test_type in ["positive", "both"]:
101 # Append extra parameters and run test generator for each set of parameters.
102 for arglist in test_params[operator]["generator_args"]:
103 ref_cmd_pos_test = ref_cmd_base.copy()
Jeremy Johnson93d43902022-09-27 12:26:14 +0100104 ref_cmd_pos_test.extend(["--test-type", "positive"])
Jeremy Johnson0ecfa372022-06-30 14:27:56 +0100105 ref_cmd_pos_test.extend(arglist)
106 ref_cmds.append(ref_cmd_pos_test)
107
108 if args.test_type in ["negative", "both"]:
Jeremy Johnson93d43902022-09-27 12:26:14 +0100109 # Get target-dtypes options only to limit tests to those needed
110 target_dtypes_args = []
111 for arglist in test_params[operator]["generator_args"]:
112 idx = 0
113 while idx < len(arglist):
114 if arglist[idx] == "--target-dtype":
115 if arglist[idx + 1] not in target_dtypes_args:
116 target_dtypes_args.extend(arglist[idx : idx + 2])
117 idx += 1 # skip over option (and then argument below)
118 idx += 1
Jeremy Johnson0ecfa372022-06-30 14:27:56 +0100119 ref_cmd_neg_test = ref_cmd_base.copy()
120 ref_cmd_neg_test.extend(["--test-type", "negative"])
Jeremy Johnson93d43902022-09-27 12:26:14 +0100121 # Limit sizes of negative tests
122 ref_cmd_neg_test.extend(["--tensor-dim-range", "1,16"])
123 ref_cmd_neg_test.extend(target_dtypes_args)
Jeremy Johnson0ecfa372022-06-30 14:27:56 +0100124 ref_cmds.append(ref_cmd_neg_test)
125
126 logger.debug(f"Creating {operator} tests with {len(ref_cmds)} parameter(s)")
127 error = False
128 for i, cmd in enumerate(ref_cmds):
129 try:
130 _run_sh_command(args, args.ref_model_dir.absolute(), cmd)
131 logger.info(
132 f"{operator} test batch {(i+1)}/{len(ref_cmds)} created successfully"
133 )
134 except Exception as e:
135 logger.error(
136 f"{operator} test batch {(i+1)}/{len(ref_cmds)} unsuccessful, skipping"
137 )
138 logger.error(f" build_op_tests error: {e} ")
139 error = True
140 if error:
141 raise (GenConformanceError())
142
143 return op_build_dir
144
145
146def _check_to_include_test(profile, test_name, exclude_negative_tests=False):
147 """Check test name for exclusions, return False to indicate excluded."""
148 excludes = ["ERRORIF"] if exclude_negative_tests else []
149 excludes.extend(PROFILE_OPS_INFO[profile]["exclude_types"])
150
151 for exclusion in excludes:
152 if f"_{exclusion}_" in test_name:
153 return False
154 return True
155
156
157def _get_all_tests_list(
158 profile, test_root_dir, operator, exclude_negative_tests=False, include_all=False
159):
160 """Create test list based on tests in the test_dir."""
161 test_dir = test_root_dir / operator
162 if not test_dir.is_dir():
163 # Tests are split into multiple dirs, for example: conv2d_1x1, conv2d_3x3
164 test_dir = test_root_dir
165 directories = [
166 tdir for tdir in test_dir.glob("*") if tdir.name.startswith(operator)
167 ]
168 else:
169 directories = [test_dir]
170
171 tests = []
172 for tdir in directories:
173 tests.extend(
174 [
175 test
176 for test in tdir.glob("*")
177 if include_all
178 or _check_to_include_test(profile, test.name, exclude_negative_tests)
179 ]
180 )
181 return tests
182
183
184def generate_results(args, operator, op_build_dir, tests=None):
185 """Run tests on reference model and save result to the test directory."""
186 num_cores = args.num_cores
187 run_tests_cmd = "tosa_verif_run_tests"
188
189 ref_model_path = args.ref_model_dir / LOCATION_REF_MODEL_BINARY
190 ref_cmd_base = ref_cmd = [
191 run_tests_cmd,
192 "--ref-model-path",
193 str(ref_model_path.absolute()),
194 "-j",
195 str(num_cores),
196 "-v",
197 "-t",
198 ]
199 ref_cmds = []
200
201 if not tests:
202 # Do not need to run ERRORIF tests as they don't have result files
203 tests = _get_all_tests_list(
204 args.profile, op_build_dir, operator, exclude_negative_tests=True
205 )
206
207 for test in tests:
208 ref_cmd = ref_cmd_base.copy()
209 ref_cmd.append(str(test))
210 ref_cmds.append(ref_cmd)
211
212 fail_string = "UNEXPECTED_FAILURE"
213 failed_counter = 0
214
215 job_pool = mp.Pool(args.num_cores)
216 sh_partial = partial(_run_sh_command, args, args.ref_model_dir.absolute())
217 pool_results = job_pool.map(sh_partial, ref_cmds)
218 job_pool.close()
219 job_pool.join()
220
221 # Use captured output for run_sh_command to work out if test passed.
222 for i, rc in enumerate(pool_results):
223 if fail_string in str(rc[0]):
224 logger.error(f"Test {i+1}/{len(ref_cmds)}: {ref_cmds[i][-1]} failed.")
225 failed_counter += 1
226 else:
227 logger.info(f"Test {i+1}/{len(ref_cmds)}: {ref_cmds[i][-1]} passed.")
228
229 logger.info(f"{len(ref_cmds)-failed_counter}/{len(ref_cmds)} tests passed")
230 logger.info("Ran tests on model and saved results of passing tests")
231
232
233def convert_tests(
234 args,
235 operator,
236 op_build_dir,
237 output_dir,
Jeremy Johnson88588622022-07-12 16:42:29 +0100238 profiles,
Jeremy Johnson0ecfa372022-06-30 14:27:56 +0100239 tests=None,
240 group=None,
241 trim_op_subdir=False,
242):
243 """Convert tests to JSON and save to output directory."""
244 ref_model_dir = args.ref_model_dir
245
246 if group:
247 output_dir = output_dir / group
248
249 ref_cmd_base = ["--ref-model-directory", str(ref_model_dir)]
Jeremy Johnson88588622022-07-12 16:42:29 +0100250 for profile in profiles:
251 ref_cmd_base.extend(["--profile", profile])
Jeremy Johnson0ecfa372022-06-30 14:27:56 +0100252 if args.framework_schema:
253 ref_cmd_base.extend(["--framework-schema", str(args.framework_schema)])
254 ref_cmd_base.append("--output-directory")
255
256 ref_cmds = []
257
258 if not tests:
259 tests = _get_all_tests_list(args.profile, op_build_dir, operator)
260 logger.info(f"Converting all {args.profile} profile tests")
261
262 # Controls if we copy the tests in their operator sub-directory or not
263 output_dir_relative_pos = -1 if trim_op_subdir else -2
264 for test in tests:
265 logger.info(f"Test chosen: {test}")
266 ref_cmd = ref_cmd_base.copy()
267 full_output_directory = output_dir / test.relative_to(
268 *test.parts[:output_dir_relative_pos]
269 )
270 ref_cmd.append(str(full_output_directory))
271 ref_cmd.append(str(test))
272 ref_cmds.append(ref_cmd)
273
274 if len(ref_cmds) == 0:
275 logger.warning("No tests found. Nothing to convert")
276 return
277
278 job_pool = mp.Pool(args.num_cores)
279
280 pool_results = job_pool.map(c2c_main, ref_cmds)
281 job_pool.close()
282 job_pool.join()
283
284 failed_counter = 0
285 for i, result in enumerate(pool_results):
286 if result != 0:
287 logger.error(
288 f"test {i+1}/{len(ref_cmds)}: {ref_cmds[i][-1]} failed to convert."
289 )
290 failed_counter += 1
291 else:
292 logger.info(f"test {i+1}/{len(ref_cmds)}: {ref_cmds[i][-1]} converted")
293 logger.info(
294 f"{len(ref_cmds)-failed_counter}/{len(ref_cmds)} tests successfully converted"
295 )
296
297 if failed_counter > 0:
298 logger.error(f"Stopping due to {failed_counter} test conversion errors")
299 raise (GenConformanceError())
300
301 logger.info("Converted tests to JSON and saved to output directory")
302
303 return output_dir
304
305
306def get_op_tests_selection(args, operator, op_build_dir, test_params, negative=False):
307 """Use test picker to get subsection of tests generated."""
308 assert operator in test_params
309 try:
310 op_params = test_params[operator]
311 op = Operator.registry[operator](
312 op_build_dir,
313 op_params,
314 negative,
315 exclude_types=PROFILE_OPS_INFO[args.profile]["exclude_types"],
316 )
317 except KeyError:
318 logger.error(f"{operator} operator is not supported by test_select")
319 raise (GenConformanceError())
320
321 return op.select_tests()
322
323
324def check_op_tests(args, operator, output_dir):
325 """Move test folders than contain files larger than 30MB to new directory."""
326 destination_dir = str(args.output_dir) + "_large_files"
327
328 tests = _get_all_tests_list(args.profile, output_dir, operator, include_all=True)
329 if not tests:
330 logger.error(
331 f"Couldn't find any tests to size check for {operator} in {output_dir}"
332 )
333 raise (GenConformanceError())
334
335 for tdir in tests:
336 move_dir = False
337 test_files = [file for file in tdir.glob("*")]
338 for file in test_files:
339 file_size = os.stat(file).st_size / 1024**2
340 if file_size > 30:
341 move_dir = True
342
343 if move_dir:
344 move_destination = destination_dir / tdir.relative_to(output_dir)
345 logger.warning(
346 f"{tdir.relative_to(output_dir)} contains files that are too large (>30MB), test moved to new folder: {destination_dir}"
347 )
348
349 if move_destination.is_dir():
350 logger.warning(
351 f"{move_destination} directory already exists, deleting existing."
352 )
353 shutil.rmtree(str(move_destination))
354 shutil.move(str(tdir), move_destination)
355
356
357def copy_rename_framework_tests(args, operator, test_picks):
358 """Copy framework tests into new folder and rename them if needed.
359
360 The tests are renamed to match the framework operator names if an
361 alternate name has been used instead.
362 """
363 framework_tests_dir = args.framework_tests_dir
364 new_tests_dir = args.build_dir / "frameworks" / operator
365 os.makedirs(new_tests_dir, exist_ok=True)
366
367 # Get the framework tests operator name
368 if "alternate_names" in test_picks[operator]:
369 alternate_names = test_picks[operator]["alternate_names"]
370 else:
371 alternate_names = [operator]
372
373 # Get the alternate named test directories for the operator
374 for alt_name in alternate_names:
375 test_prefix = f"test_{alt_name}"
376 test_dirs = list(framework_tests_dir.glob(f"{test_prefix}_*"))
377
378 # Copy tests to new directory and rename to match framework operator names
379 # - if there is just 1 alternate name, replace the full test prefix
380 # test_add_... -> add_...
381 # - if there are multiple alternate names, just replace the "test"
382 # test_concatv2_... -> concatenation_concatv2_...
383 old_prefix = test_prefix if len(alternate_names) == 1 else "test"
384
385 for tdir in test_dirs:
386 new_test_name = tdir.name.replace(old_prefix, operator)
387 copy_destination = new_tests_dir / new_test_name
388 logger.debug(f"copying test folder {tdir} to {copy_destination}")
389 copy_tree(str(tdir), str(copy_destination))
390
391 logger.info(f"Copied and renamed {len(test_dirs)} framework test folders")
392 return new_tests_dir.parent
393
394
395def get_framework_tests_selection(args, operator, test_picks, op_build_dir):
396 """Get the list of pre-chosen tests with relative paths."""
397 try:
398 tests = test_picks[operator]["tests"]
399 except KeyError:
400 logger.error(f"Framework test selection not defined for {operator} operator")
401 raise (GenConformanceError())
402
403 test_paths = [op_build_dir / operator / test for test in tests]
404 return test_paths
405
406
407def parse_args(argv=None):
408 """Parse the arguments."""
409 parser = argparse.ArgumentParser()
Jeremy Johnson88588622022-07-12 16:42:29 +0100410 profiles = list(PROFILE_OPS_INFO.keys())
411 parser.add_argument(
412 "--profile",
413 dest="profile",
414 choices=profiles,
415 default=profiles[0],
416 type=str,
417 help=f"TOSA profile (default is {profiles[0]})",
418 )
Jeremy Johnson0ecfa372022-06-30 14:27:56 +0100419 parser.add_argument(
420 "--operators",
421 type=str,
422 nargs="*",
423 help="The operator(s) to create tests for, if not supplied all tests will be created",
424 )
425 parser.add_argument(
Jeremy Johnson88588622022-07-12 16:42:29 +0100426 "--unit-tests",
427 dest="unit_tests",
428 choices=["operator", "framework", "both"],
429 default="operator",
430 type=str,
431 help="Which unit tests are produced (default is operator)",
432 )
433 parser.add_argument(
434 "--test-type",
435 dest="test_type",
436 choices=["positive", "negative", "both"],
437 default="both",
438 type=str,
439 help="Type of tests produced (default is both)",
440 )
441 parser.add_argument(
Jeremy Johnson0ecfa372022-06-30 14:27:56 +0100442 "--ref-model-directory",
443 dest="ref_model_dir",
444 type=Path,
445 required=True,
446 help="Reference Model directory (must be pre-built)",
447 )
Jeremy Johnson88588622022-07-12 16:42:29 +0100448 parser.add_argument(
Jeremy Johnson93d43902022-09-27 12:26:14 +0100449 "--seed",
450 dest="random_seed",
451 default=DEFAULT_SEED,
452 type=int,
453 help="Random test seed",
454 )
455 parser.add_argument(
Jeremy Johnson88588622022-07-12 16:42:29 +0100456 "--framework-tests-directory",
457 dest="framework_tests_dir",
458 type=Path,
459 default=Path.cwd() / "tests",
460 help="The pre-built framework tests directory (default is tests)",
461 )
462 parser.add_argument(
463 "--framework-schema",
464 dest="framework_schema",
465 type=Path,
466 help="Framework flatbuffers schema needed to convert framework models",
467 )
468 parser.add_argument(
469 "--build-directory",
470 dest="build_dir",
471 type=Path,
472 default=Path.cwd() / "conformance_build",
473 help="Temporary build directory for files created during this process (default is conformance_build)",
474 )
475 parser.add_argument(
476 "--output-directory",
477 dest="output_dir",
478 type=Path,
479 default=Path.cwd() / "conformance",
480 help="Output directory (default is conformance)",
481 )
Jeremy Johnson0ecfa372022-06-30 14:27:56 +0100482 script_dir = Path(__file__).parent.absolute()
483 parser.add_argument(
484 "--test-param-json-directory",
485 dest="param_json_dir",
486 type=Path,
487 default=script_dir,
Jeremy Johnson88588622022-07-12 16:42:29 +0100488 help=f"Test parameters (ops info) JSON file directory (default is {script_dir})",
Jeremy Johnson0ecfa372022-06-30 14:27:56 +0100489 )
490 parser.add_argument(
491 "--convert-all-tests",
492 action="store_true",
493 help="Converts all tests instead of those picked by test_select",
494 )
495 parser.add_argument(
496 "--keep-large-files",
497 action="store_true",
498 help="Keeps tests that contain files larger than 30MB in output directory",
499 )
500 parser.add_argument(
501 "--capture-output",
502 action="store_true",
503 help="Prints output of running sh commands",
504 )
505 parser.add_argument(
Jeremy Johnson0ecfa372022-06-30 14:27:56 +0100506 "-j",
507 dest="num_cores",
508 type=int,
509 default=6,
510 help="Number of simultaneous jobs to split the tasks into for multiprocessing",
511 )
512 parser.add_argument(
513 "-v",
514 dest="verbosity",
515 action="count",
516 default=0,
517 help="Verbosity (can be used multiple times for more details)",
518 )
Jeremy Johnson0ecfa372022-06-30 14:27:56 +0100519 args = parser.parse_args(argv)
520
521 return args
522
523
524def main():
525 args = parse_args()
526
527 if not args.ref_model_dir.is_dir():
528 logger.error(
529 f"Missing or invalid reference model directory: {args.ref_model_dir}"
530 )
531 return 2
532 else:
533 ref_model = args.ref_model_dir / LOCATION_REF_MODEL_BINARY
534 if not ref_model.is_file():
535 logger.error(
536 f"{LOCATION_REF_MODEL_BINARY} not found in {args.ref_model_dir}\nHave you built the reference model?"
537 )
538 return 2
539 if args.unit_tests in ["framework", "both"]:
540 if not args.framework_schema:
541 logger.error(
542 "Need to supply location of Framework flatbuffers schema via --framework-schema"
543 )
544 return 2
545 if not args.framework_tests_dir.is_dir():
546 logger.error(
547 f"Missing or invalid framework tests directory: {args.framework_tests_dir}"
548 )
549 return 2
550
551 loglevels = (logging.WARNING, logging.INFO, logging.DEBUG)
552 loglevel = loglevels[min(args.verbosity, len(loglevels) - 1)]
553 logger.setLevel(loglevel)
554 # Set other loggers the same
555 logging.getLogger("test_select").setLevel(loglevel)
556 logging.getLogger("convert2conformance").setLevel(loglevel)
557
558 print(f"Creating conformance tests for TOSA {args.profile} profile")
559 print(f"Output directory: {args.output_dir}")
560
Jeremy Johnson93d43902022-09-27 12:26:14 +0100561 if args.random_seed != DEFAULT_SEED:
562 logger.warning(
563 "Random test seed changed from default, tests will not match official conformance"
564 )
565
Jeremy Johnson0ecfa372022-06-30 14:27:56 +0100566 args.build_dir = args.build_dir.resolve()
567 logger.debug(f"Creating build directory: {args.build_dir}")
568 args.build_dir.mkdir(parents=True, exist_ok=True)
569
570 try:
571 # Framework unit tests
572 if args.unit_tests in ["framework", "both"]:
573 logger.debug("Creating FRAMEWORK unit tests")
574 test_picks_file = (
575 args.param_json_dir / PROFILE_OPS_INFO[args.profile]["framework_tests"]
576 )
577 try:
578 with open(test_picks_file, "r") as fd:
579 test_picks = json.load(fd)
580 except Exception as e:
581 logger.error(
582 f"Couldn't load framework tests info - {test_picks_file}: {e}"
583 )
584 return 1
585
586 operators = args.operators
587 if not operators:
588 # Create tests for all the operators
589 operators = list(test_picks.keys())
590
591 root_output_dir = args.output_dir / "frameworks" / "tflite" / "operators"
592 for op in operators:
593 if op not in test_picks:
594 logger.warning(
595 f"Framework op {op} not found in {test_picks_file} - skipping"
596 )
597 continue
598
599 logger.debug(f"Copying and renaming {op}")
600 framework_test_dir = copy_rename_framework_tests(args, op, test_picks)
Jeremy Johnson88588622022-07-12 16:42:29 +0100601 profiles = test_picks[op]["profile"]
Jeremy Johnson0ecfa372022-06-30 14:27:56 +0100602 if args.convert_all_tests:
603 logger.debug("Running and converting all framework tests")
604 convert_tests(
605 args,
606 op,
607 framework_test_dir,
608 root_output_dir,
Jeremy Johnson88588622022-07-12 16:42:29 +0100609 profiles,
Jeremy Johnson0ecfa372022-06-30 14:27:56 +0100610 trim_op_subdir=True,
611 )
612 else:
613 framework_tests = get_framework_tests_selection(
614 args, op, test_picks, framework_test_dir
615 )
616 convert_tests(
617 args,
618 op,
619 framework_test_dir,
620 root_output_dir,
Jeremy Johnson88588622022-07-12 16:42:29 +0100621 profiles,
Jeremy Johnson0ecfa372022-06-30 14:27:56 +0100622 tests=framework_tests,
623 trim_op_subdir=True,
624 )
625
626 # Operator unit tests
627 if args.unit_tests in ["operator", "both"]:
628 logger.debug("Creating OPERATOR unit tests")
629 test_params_file = (
630 args.param_json_dir
631 / PROFILE_OPS_INFO[args.profile]["operator_test_params"]
632 )
633 try:
634 with open(test_params_file, "r") as fd:
635 test_params = json.load(fd)
636 except Exception as e:
637 logger.error(
638 f"Couldn't load operator test params - {test_params_file}: {e}"
639 )
640 return 1
641
642 operators = args.operators
643 if not operators:
644 # Create tests for all the operators
645 operators = list(test_params.keys())
646
647 for op in operators:
648 if op not in test_params:
649 logger.warning(
650 f"{op} operator parameters not found in {test_params_file} - skipping"
651 )
652 continue
653
654 if (
655 args.test_type == "negative"
656 and "no_negative_tests" in test_params[op]
657 and test_params[op]["no_negative_tests"]
658 ):
659 logger.warning(f"No negative tests for {op}")
660 continue
661
662 op_build_dir = build_op_tests(args, op, test_params)
663
664 operator_group = test_params[op]["group"]
665 root_output_dir = args.output_dir / "operators"
Jeremy Johnson88588622022-07-12 16:42:29 +0100666 profiles = test_params[op]["profile"]
Jeremy Johnson0ecfa372022-06-30 14:27:56 +0100667 if args.convert_all_tests:
668 logger.debug(f"Running and converting all {op} tests")
669 generate_results(args, op, op_build_dir)
670 output_dir = convert_tests(
Jeremy Johnson88588622022-07-12 16:42:29 +0100671 args,
672 op,
673 op_build_dir,
674 root_output_dir,
675 profiles,
676 group=operator_group,
Jeremy Johnson0ecfa372022-06-30 14:27:56 +0100677 )
678 else:
679 if args.test_type in ["positive", "both"]:
680 tests_gen1, tests_gen2 = tee(
681 get_op_tests_selection(args, op, op_build_dir, test_params)
682 )
683 generate_results(args, op, op_build_dir, tests_gen1)
684 output_dir = convert_tests(
685 args,
686 op,
687 op_build_dir,
688 root_output_dir,
Jeremy Johnson88588622022-07-12 16:42:29 +0100689 profiles,
Jeremy Johnson0ecfa372022-06-30 14:27:56 +0100690 tests=tests_gen2,
691 group=operator_group,
692 )
693 if args.test_type in ["negative", "both"] and (
694 "no_negative_tests" not in test_params[op]
695 or not test_params[op]["no_negative_tests"]
696 ):
697 negative_tests = get_op_tests_selection(
698 args, op, op_build_dir, test_params, negative=True
699 )
700 output_dir = convert_tests(
701 args,
702 op,
703 op_build_dir,
704 root_output_dir,
Jeremy Johnson88588622022-07-12 16:42:29 +0100705 profiles,
Jeremy Johnson0ecfa372022-06-30 14:27:56 +0100706 tests=negative_tests,
707 group=operator_group,
708 )
709 if not args.keep_large_files:
710 check_op_tests(args, op, output_dir)
711 except GenConformanceError:
712 return 1
713
714 return 0
715
716
717if __name__ == "__main__":
718 exit(main())