1#! /usr/bin/env python3 2# 3# Copyright 2020 The ANGLE Project Authors. All rights reserved. 4# Use of this source code is governed by a BSD-style license that can be 5# found in the LICENSE file. 6# 7''' 8Script that re-captures the traces in the restricted trace folder. We can 9use this to update traces without needing to re-run the app on a device. 10''' 11 12import argparse 13import fnmatch 14import json 15import logging 16import os 17import pathlib 18import shutil 19import stat 20import subprocess 21import sys 22import tempfile 23import time 24 25from difflib import unified_diff 26from gen_restricted_traces import read_json as read_json, write_json as write_json 27from pathlib import Path 28 29SCRIPT_DIR = str(pathlib.Path(__file__).resolve().parent) 30PY_UTILS = str(pathlib.Path(SCRIPT_DIR) / '..' / 'py_utils') 31if PY_UTILS not in sys.path: 32 os.stat(PY_UTILS) and sys.path.insert(0, PY_UTILS) 33import android_helper 34import angle_test_util 35 36DEFAULT_TEST_SUITE = angle_test_util.ANGLE_TRACE_TEST_SUITE 37DEFAULT_TEST_JSON = 'restricted_traces.json' 38DEFAULT_LOG_LEVEL = 'info' 39DEFAULT_BACKUP_FOLDER = 'retrace-backups' 40 41EXIT_SUCCESS = 0 42EXIT_FAILURE = 1 43 44# Test expectations 45FAIL = 'FAIL' 46PASS = 'PASS' 47SKIP = 'SKIP' 48 49 50def get_trace_json_path(trace): 51 return os.path.join(get_script_dir(), trace, f'{trace}.json') 52 53 54def load_trace_json(trace): 55 json_file_name = get_trace_json_path(trace) 56 return read_json(json_file_name) 57 58 59def get_context(trace): 60 """Returns the trace context number.""" 61 json_data = load_trace_json(trace) 62 return str(json_data['WindowSurfaceContextID']) 63 64 65def get_script_dir(): 66 return os.path.dirname(sys.argv[0]) 67 68 69def context_header(trace, trace_path): 70 context_id = get_context(trace) 71 header = '%s_context%s.h' % (trace, context_id) 72 return os.path.join(trace_path, header) 73 74 75def src_trace_path(trace): 76 return os.path.join(get_script_dir(), trace) 77 78 79def get_num_frames(json_data): 80 metadata = json_data['TraceMetadata'] 81 return metadata['FrameEnd'] - metadata['FrameStart'] + 1 82 83 84def get_gles_version(json_data): 85 metadata = json_data['TraceMetadata'] 86 return (metadata['ContextClientMajorVersion'], metadata['ContextClientMinorVersion']) 87 88 89def set_gles_version(json_data, version): 90 metadata = json_data['TraceMetadata'] 91 metadata['ContextClientMajorVersion'] = version[0] 92 metadata['ContextClientMinorVersion'] = version[1] 93 94 95def save_trace_json(trace, data): 96 json_file_name = get_trace_json_path(trace) 97 return write_json(json_file_name, data) 98 99 100def path_contains_header(path): 101 if not os.path.isdir(path): 102 return False 103 for file in os.listdir(path): 104 if fnmatch.fnmatch(file, '*.h'): 105 return True 106 return False 107 108 109def chmod_directory(directory, perm): 110 assert os.path.isdir(directory) 111 for file in os.listdir(directory): 112 fn = os.path.join(directory, file) 113 os.chmod(fn, perm) 114 115 116def ensure_rmdir(directory): 117 if os.path.isdir(directory): 118 chmod_directory(directory, stat.S_IWRITE) 119 shutil.rmtree(directory) 120 121 122def copy_trace_folder(old_path, new_path): 123 logging.info('%s -> %s' % (old_path, new_path)) 124 ensure_rmdir(new_path) 125 shutil.copytree(old_path, new_path) 126 127 128def touch_trace_folder(trace_path): 129 for file in os.listdir(trace_path): 130 (Path(trace_path) / file).touch() 131 132 133def backup_single_trace(trace, backup_path): 134 trace_path = src_trace_path(trace) 135 trace_backup_path = os.path.join(backup_path, trace) 136 copy_trace_folder(trace_path, trace_backup_path) 137 138 139def backup_traces(args, traces): 140 for trace in angle_test_util.FilterTests(traces, args.traces): 141 backup_single_trace(trace, args.out_path) 142 143 144def restore_single_trace(trace, backup_path): 145 trace_path = src_trace_path(trace) 146 trace_backup_path = os.path.join(backup_path, trace) 147 if not os.path.isdir(trace_backup_path): 148 logging.error('Trace folder not found at %s' % trace_backup_path) 149 return False 150 else: 151 copy_trace_folder(trace_backup_path, trace_path) 152 touch_trace_folder(trace_path) 153 return True 154 155 156def restore_traces(args, traces): 157 for trace in angle_test_util.FilterTests(traces, args.traces): 158 restore_single_trace(trace, args.out_path) 159 160 161def run_autoninja(args): 162 autoninja_binary = 'autoninja' 163 if os.name == 'nt': 164 autoninja_binary += '.bat' 165 166 autoninja_args = [autoninja_binary, '-C', args.gn_path, args.test_suite] 167 logging.debug('Calling %s' % ' '.join(autoninja_args)) 168 if args.show_test_stdout: 169 subprocess.run(autoninja_args, check=True) 170 else: 171 subprocess.check_output(autoninja_args) 172 173 174def run_test_suite(args, trace_binary, trace, max_steps, additional_args, additional_env): 175 run_args = [ 176 angle_test_util.ExecutablePathInCurrentDir(trace_binary), 177 '--gtest_filter=TraceTest.%s' % trace, 178 '--max-steps-performed', 179 str(max_steps), 180 ] + additional_args 181 if not args.no_swiftshader: 182 run_args += ['--use-angle=swiftshader'] 183 184 env = {**os.environ.copy(), **additional_env} 185 env_string = ' '.join(['%s=%s' % item for item in additional_env.items()]) 186 if env_string: 187 env_string += ' ' 188 189 logging.info('%s%s' % (env_string, ' '.join(run_args))) 190 p = subprocess.run(run_args, env=env, capture_output=True, check=True) 191 if args.show_test_stdout: 192 logging.info('Test stdout:\n%s' % p.stdout.decode()) 193 194 195def upgrade_single_trace(args, trace_binary, trace, out_path, no_overwrite, c_sources): 196 logging.debug('Tracing %s' % trace) 197 198 trace_path = os.path.abspath(os.path.join(out_path, trace)) 199 if no_overwrite and path_contains_header(trace_path): 200 logging.info('Skipping "%s" because the out folder already exists' % trace) 201 return 202 203 json_data = load_trace_json(trace) 204 num_frames = get_num_frames(json_data) 205 206 metadata = json_data['TraceMetadata'] 207 logging.debug('Read metadata: %s' % str(metadata)) 208 209 max_steps = min(args.limit, num_frames) if args.limit else num_frames 210 211 # We start tracing from frame 2. --retrace-mode issues a Swap() after Setup() so we can 212 # accurately re-trace the MEC. 213 additional_env = { 214 'ANGLE_CAPTURE_LABEL': trace, 215 'ANGLE_CAPTURE_OUT_DIR': trace_path, 216 'ANGLE_CAPTURE_FRAME_START': '2', 217 'ANGLE_CAPTURE_FRAME_END': str(max_steps + 1), 218 } 219 if args.validation: 220 additional_env['ANGLE_CAPTURE_VALIDATION'] = '1' 221 # Also turn on shader output init to ensure we have no undefined values. 222 # This feature is also enabled in replay when using --validation. 223 additional_env[ 224 'ANGLE_FEATURE_OVERRIDES_ENABLED'] = 'allocateNonZeroMemory:forceInitShaderVariables' 225 if args.validation_expr: 226 additional_env['ANGLE_CAPTURE_VALIDATION_EXPR'] = args.validation_expr 227 # TODO: Remove when default. http://anglebug.com/42266223 228 if c_sources: 229 additional_env['ANGLE_CAPTURE_SOURCE_EXT'] = 'c' 230 231 additional_args = ['--retrace-mode'] 232 233 try: 234 if not os.path.isdir(trace_path): 235 os.makedirs(trace_path) 236 237 run_test_suite(args, trace_binary, trace, max_steps, additional_args, additional_env) 238 239 json_file = "{}/{}.json".format(trace_path, trace) 240 if not os.path.exists(json_file): 241 logging.error( 242 f'There was a problem tracing "{trace}", could not find json file: {json_file}') 243 return False 244 245 # Copy over the list obtained by get_min_reqs if present 246 if 'RequiredExtensions' in json_data: 247 new_data = read_json(json_file) 248 new_data['RequiredExtensions'] = json_data['RequiredExtensions'] 249 write_json(json_file, new_data) 250 251 except subprocess.CalledProcessError as e: 252 logging.exception('There was an exception running "%s":\n%s' % (trace, e.output.decode())) 253 return False 254 255 return True 256 257 258def upgrade_traces(args, traces): 259 run_autoninja(args) 260 trace_binary = os.path.join(args.gn_path, args.test_suite) 261 262 failures = [] 263 264 for trace in angle_test_util.FilterTests(traces, args.traces): 265 if not upgrade_single_trace(args, trace_binary, trace, args.out_path, args.no_overwrite, 266 args.c_sources): 267 failures += [trace] 268 269 if failures: 270 print('The following traces failed to upgrade:\n') 271 print('\n'.join([' ' + trace for trace in failures])) 272 return EXIT_FAILURE 273 274 return EXIT_SUCCESS 275 276 277def validate_single_trace(args, trace_binary, trace, additional_args, additional_env): 278 json_data = load_trace_json(trace) 279 num_frames = get_num_frames(json_data) 280 max_steps = min(args.limit, num_frames) if args.limit else num_frames 281 try: 282 run_test_suite(args, trace_binary, trace, max_steps, additional_args, additional_env) 283 except subprocess.CalledProcessError as e: 284 logging.error('There was a failure running "%s":\n%s' % (trace, e.output.decode())) 285 return False 286 return True 287 288 289def validate_traces(args, traces): 290 restore_traces(args, traces) 291 run_autoninja(args) 292 293 additional_args = ['--validation'] 294 additional_env = { 295 'ANGLE_FEATURE_OVERRIDES_ENABLED': 'allocateNonZeroMemory:forceInitShaderVariables' 296 } 297 298 failures = [] 299 trace_binary = os.path.join(args.gn_path, args.test_suite) 300 301 for trace in angle_test_util.FilterTests(traces, args.traces): 302 if not validate_single_trace(args, trace_binary, trace, additional_args, additional_env): 303 failures += [trace] 304 305 if failures: 306 print('The following traces failed to validate:\n') 307 print('\n'.join([' ' + trace for trace in failures])) 308 return EXIT_FAILURE 309 310 return EXIT_SUCCESS 311 312 313def interpret_traces(args, traces): 314 test_name = 'angle_trace_interpreter_tests' 315 results = { 316 'tests': { 317 test_name: {} 318 }, 319 'interrupted': False, 320 'seconds_since_epoch': time.time(), 321 'path_delimiter': '.', 322 'version': 3, 323 'num_failures_by_type': { 324 FAIL: 0, 325 PASS: 0, 326 SKIP: 0, 327 }, 328 } 329 330 if args.path: 331 trace_binary = os.path.join(args.path, args.test_suite) 332 else: 333 trace_binary = args.test_suite 334 335 for trace in angle_test_util.FilterTests(traces, args.traces): 336 with tempfile.TemporaryDirectory() as backup_path: 337 backup_single_trace(trace, backup_path) 338 result = FAIL 339 try: 340 with tempfile.TemporaryDirectory() as out_path: 341 logging.debug('Using temporary path %s.' % out_path) 342 if upgrade_single_trace(args, trace_binary, trace, out_path, False, True): 343 if restore_single_trace(trace, out_path): 344 validate_args = ['--trace-interpreter=c'] 345 if args.verbose: 346 validate_args += ['--verbose-logging'] 347 if validate_single_trace(args, trace_binary, trace, validate_args, {}): 348 logging.info('%s passed!' % trace) 349 result = PASS 350 finally: 351 restore_single_trace(trace, backup_path) 352 results['num_failures_by_type'][result] += 1 353 results['tests'][test_name][trace] = {'expected': PASS, 'actual': result} 354 355 if results['num_failures_by_type'][FAIL]: 356 logging.error('Some tests failed.') 357 return EXIT_FAILURE 358 359 if results['num_failures_by_type'][PASS] == 0: 360 logging.error('No tests ran. Please check your command line arguments.') 361 return EXIT_FAILURE 362 363 if args.test_output: 364 with open(args.test_output, 'w') as out_file: 365 out_file.write(json.dumps(results, indent=2)) 366 367 return EXIT_SUCCESS 368 369 370def add_upgrade_args(parser): 371 parser.add_argument( 372 '--validation', help='Enable state serialization validation calls.', action='store_true') 373 parser.add_argument( 374 '--validation-expr', 375 help='Validation expression, used to add more validation checkpoints.') 376 parser.add_argument( 377 '-L', 378 '--limit', 379 '--frame-limit', 380 type=int, 381 help='Limits the number of captured frames to produce a shorter trace than the original.') 382 383 384def get_min_reqs(args, traces): 385 run_autoninja(args) 386 387 env = {} 388 # List of extensions that impliclity enable *other* extensions 389 extension_deny_list = [ 390 'GL_ANGLE_shader_pixel_local_storage', 'GL_ANGLE_shader_pixel_local_storage_coherent' 391 ] 392 # List of extensions which de facto imply others. The implied extensions are removed 393 # from the RequiredExtensions list for wider platform support: http://anglebug.com/380026310 394 implied_extension_filter = [("GL_OES_compressed_ETC1_RGB8_texture", 395 "GL_EXT_compressed_ETC1_RGB8_sub_texture")] 396 default_args = ["--no-warmup"] 397 398 skipped_traces = [] 399 trace_binary = os.path.join(args.gn_path, args.test_suite) 400 401 for trace in angle_test_util.FilterTests(traces, args.traces): 402 print(f"Finding requirements for {trace}") 403 extensions = [] 404 json_data = load_trace_json(trace) 405 original_json_data = json.dumps(json_data, sort_keys=True, indent=4) 406 max_steps = get_num_frames(json_data) 407 408 # exts: a list of extensions to use with run_test_suite. If empty, 409 # then run_test_suite runs with all extensions enabled by default. 410 def run_test_suite_with_exts(exts): 411 additional_args = default_args.copy() 412 if len(exts) > 0: 413 additional_args += ['--request-extensions', ' '.join(exts)] 414 415 try: 416 run_test_suite(args, trace_binary, trace, max_steps, additional_args, env) 417 except subprocess.CalledProcessError as error: 418 return False 419 return True 420 421 original_gles_version = get_gles_version(json_data) 422 original_extensions = None if 'RequiredExtensions' not in json_data else json_data[ 423 'RequiredExtensions'] 424 425 def restore_trace(): 426 if original_extensions is not None: 427 json_data['RequiredExtensions'] = original_extensions 428 set_gles_version(json_data, original_gles_version) 429 save_trace_json(trace, json_data) 430 431 try: 432 # Use the highest GLES version we have and empty the required 433 # extensions so that previous data doesn't affect the current 434 # run. 435 json_data['RequiredExtensions'] = [] 436 save_trace_json(trace, json_data) 437 if not run_test_suite_with_exts([]): 438 skipped_traces.append( 439 (trace, "Fails to run in default configuration on this machine")) 440 restore_trace() 441 continue 442 443 # Find minimum GLES version. 444 gles_versions = [(1, 0), (1, 1), (2, 0), (3, 0), (3, 1), (3, 2)] 445 min_version = None 446 for idx in range(len(gles_versions)): 447 min_version = gles_versions[idx] 448 set_gles_version(json_data, min_version) 449 save_trace_json(trace, json_data) 450 try: 451 run_test_suite(args, trace_binary, trace, max_steps, default_args, env) 452 except subprocess.CalledProcessError as error: 453 continue 454 break 455 456 # Get the list of requestable extensions for the GLES version. 457 try: 458 # Get the list of requestable extensions 459 with tempfile.NamedTemporaryFile() as tmp: 460 # Some operating systems will not allow a file to be open for writing 461 # by multiple processes. So close the temp file we just made before 462 # running the test suite. 463 tmp.close() 464 additional_args = ["--print-extensions-to-file", tmp.name] 465 run_test_suite(args, trace_binary, trace, max_steps, additional_args, env) 466 with open(tmp.name) as f: 467 for line in f: 468 if line.strip() not in extension_deny_list: 469 extensions.append(line.strip()) 470 except Exception: 471 skipped_traces.append( 472 (trace, "Failed to read extension list, likely that test is skipped")) 473 restore_trace() 474 continue 475 476 if len(extensions) > 0 and not run_test_suite_with_exts(extensions): 477 skipped_traces.append((trace, "Requesting all extensions results in test failure")) 478 restore_trace() 479 continue 480 481 # Reset RequiredExtensions so it doesn't interfere with our search 482 json_data['RequiredExtensions'] = [] 483 save_trace_json(trace, json_data) 484 485 # Use a divide and conquer strategy to find the required extensions. 486 # Max depth is log(N) where N is the number of extensions. Expected 487 # runtime is p*log(N), where p is the number of required extensions. 488 # p*log(N). Assume a single possible solution - see 'extension_deny_list'. 489 # others: A list that contains one or more required extensions, 490 # but is not actively being searched 491 # exts: The list of extensions actively being searched 492 def recurse_run(others, exts, depth=0): 493 if len(exts) <= 1: 494 return exts 495 middle = int(len(exts) / 2) 496 left_partition = exts[:middle] 497 right_partition = exts[middle:] 498 left_passed = run_test_suite_with_exts(others + left_partition) 499 500 if depth > 0 and left_passed: 501 # We know right_passed must be False because one stack up 502 # run_test_suite(exts) returned False. 503 return recurse_run(others, left_partition) 504 505 right_passed = run_test_suite_with_exts(others + right_partition) 506 if left_passed and right_passed: 507 # Neither left nor right contain necessary extensions 508 return [] 509 elif left_passed: 510 # Only left contains necessary extensions 511 return recurse_run(others, left_partition, depth + 1) 512 elif right_passed: 513 # Only right contains necessary extensions 514 return recurse_run(others, right_partition, depth + 1) 515 else: 516 # Both left and right contain necessary extensions 517 left_reqs = recurse_run(others + right_partition, left_partition, depth + 1) 518 right_reqs = recurse_run(others + left_reqs, right_partition, depth + 1) 519 return left_reqs + right_reqs 520 521 recurse_reqs = recurse_run([], extensions, 0) 522 # Handle extensions which de facto imply others 523 for extension in implied_extension_filter: 524 if extension[0] in recurse_reqs: 525 recurse_reqs.remove(extension[1]) 526 527 json_data['RequiredExtensions'] = recurse_reqs 528 save_trace_json(trace, json_data) 529 530 # Output json file diff 531 min_reqs_json_data = json.dumps(json_data, sort_keys=True, indent=4) 532 if original_json_data == min_reqs_json_data: 533 print(f"\nNo changes made to {trace}.json") 534 else: 535 json_diff = unified_diff( 536 original_json_data.splitlines(), min_reqs_json_data.splitlines(), lineterm='') 537 print(f"\nGet Min Requirements modifications to {trace}.json:") 538 print('\n'.join(list(json_diff))) 539 except BaseException as e: 540 restore_trace() 541 raise e 542 543 if skipped_traces: 544 print("Finished get_min_reqs, skipped traces:") 545 for trace, reason in skipped_traces: 546 print(f"\t{trace}: {reason}") 547 else: 548 print("Finished get_min_reqs for all traces specified") 549 550 551def main(): 552 parser = argparse.ArgumentParser() 553 parser.add_argument('-l', '--log', help='Logging level.', default=DEFAULT_LOG_LEVEL) 554 parser.add_argument( 555 '--test-suite', 556 help='Test Suite. Default is %s' % DEFAULT_TEST_SUITE, 557 default=DEFAULT_TEST_SUITE) 558 parser.add_argument( 559 '--no-swiftshader', 560 help='Trace against native Vulkan.', 561 action='store_true', 562 default=False) 563 parser.add_argument( 564 '--test-output', '--isolated-script-test-output', help='Where to write results JSON.') 565 566 subparsers = parser.add_subparsers(dest='command', required=True, help='Command to run.') 567 568 backup_parser = subparsers.add_parser( 569 'backup', help='Copies trace contents into a saved folder.') 570 backup_parser.add_argument( 571 'traces', help='Traces to back up. Supports fnmatch expressions.', default='*') 572 backup_parser.add_argument( 573 '-o', 574 '--out-path', 575 '--backup-path', 576 help='Destination folder. Default is "%s".' % DEFAULT_BACKUP_FOLDER, 577 default=DEFAULT_BACKUP_FOLDER) 578 579 restore_parser = subparsers.add_parser( 580 'restore', help='Copies traces from a saved folder to the trace folder.') 581 restore_parser.add_argument( 582 '-o', 583 '--out-path', 584 '--backup-path', 585 help='Path the traces were saved. Default is "%s".' % DEFAULT_BACKUP_FOLDER, 586 default=DEFAULT_BACKUP_FOLDER) 587 restore_parser.add_argument( 588 'traces', help='Traces to restore. Supports fnmatch expressions.', default='*') 589 590 upgrade_parser = subparsers.add_parser( 591 'upgrade', help='Re-trace existing traces, upgrading the format.') 592 upgrade_parser.add_argument('gn_path', help='GN build path') 593 upgrade_parser.add_argument('out_path', help='Output directory') 594 upgrade_parser.add_argument( 595 '-f', '--traces', '--filter', help='Trace filter. Defaults to all.', default='*') 596 upgrade_parser.add_argument( 597 '-n', 598 '--no-overwrite', 599 help='Skip traces which already exist in the out directory.', 600 action='store_true') 601 upgrade_parser.add_argument( 602 '-c', '--c-sources', help='Output to c sources instead of cpp.', action='store_true') 603 add_upgrade_args(upgrade_parser) 604 upgrade_parser.add_argument( 605 '--show-test-stdout', help='Log test output.', action='store_true', default=False) 606 607 validate_parser = subparsers.add_parser( 608 'validate', help='Runs the an updated test suite with validation enabled.') 609 validate_parser.add_argument('gn_path', help='GN build path') 610 validate_parser.add_argument('out_path', help='Path to the upgraded trace folder.') 611 validate_parser.add_argument( 612 'traces', help='Traces to validate. Supports fnmatch expressions.', default='*') 613 validate_parser.add_argument( 614 '-L', '--limit', '--frame-limit', type=int, help='Limits the number of tested frames.') 615 validate_parser.add_argument( 616 '--show-test-stdout', help='Log test output.', action='store_true', default=False) 617 618 interpret_parser = subparsers.add_parser( 619 'interpret', help='Complete trace interpreter self-test.') 620 interpret_parser.add_argument( 621 '-p', '--path', help='Path to trace executable. Default: look in CWD.') 622 interpret_parser.add_argument( 623 'traces', help='Traces to test. Supports fnmatch expressions.', default='*') 624 add_upgrade_args(interpret_parser) 625 interpret_parser.add_argument( 626 '--show-test-stdout', help='Log test output.', action='store_true', default=False) 627 interpret_parser.add_argument( 628 '-v', 629 '--verbose', 630 help='Verbose logging in the trace tests.', 631 action='store_true', 632 default=False) 633 634 get_min_reqs_parser = subparsers.add_parser( 635 'get_min_reqs', 636 help='Finds the minimum required extensions for a trace to successfully run.') 637 get_min_reqs_parser.add_argument('gn_path', help='GN build path') 638 get_min_reqs_parser.add_argument( 639 '--traces', 640 help='Traces to get minimum requirements for. Supports fnmatch expressions.', 641 default='*') 642 get_min_reqs_parser.add_argument( 643 '--show-test-stdout', help='Log test output.', action='store_true', default=False) 644 645 args, extra_flags = parser.parse_known_args() 646 647 logging.basicConfig(level=args.log.upper()) 648 649 # Load trace names 650 with open(os.path.join(get_script_dir(), DEFAULT_TEST_JSON)) as f: 651 traces = json.loads(f.read()) 652 653 traces = [trace.split(' ')[0] for trace in traces['traces']] 654 655 try: 656 if args.command == 'backup': 657 return backup_traces(args, traces) 658 elif args.command == 'restore': 659 return restore_traces(args, traces) 660 elif args.command == 'upgrade': 661 return upgrade_traces(args, traces) 662 elif args.command == 'validate': 663 return validate_traces(args, traces) 664 elif args.command == 'interpret': 665 return interpret_traces(args, traces) 666 elif args.command == 'get_min_reqs': 667 return get_min_reqs(args, traces) 668 else: 669 logging.fatal('Unknown command: %s' % args.command) 670 return EXIT_FAILURE 671 except subprocess.CalledProcessError as e: 672 if args.show_test_stdout: 673 logging.exception('There was an exception running "%s"' % traces) 674 else: 675 logging.exception('There was an exception running "%s": %s' % 676 (traces, e.output.decode())) 677 678 return EXIT_FAILURE 679 680 681if __name__ == '__main__': 682 sys.exit(main()) 683