1 #!/usr/bin/python 2 # Copyright (c) 2010 The Chromium OS Authors. All rights reserved. 3 # Use of this source code is governed by a BSD-style license that can be 4 # found in the LICENSE file. 5 6 7 """Parses and displays the contents of one or more autoserv result directories. 8 9 This script parses the contents of one or more autoserv results folders and 10 generates test reports. 11 """ 12 13 import datetime 14 import glob 15 import logging 16 import operator 17 import optparse 18 import os 19 import re 20 import sys 21 22 import common 23 try: 24 # Ensure the chromite site-package is installed. 25 from chromite.lib import terminal 26 except ImportError: 27 import subprocess 28 build_externals_path = os.path.join( 29 os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 30 'utils', 'build_externals.py') 31 subprocess.check_call([build_externals_path, 'chromiterepo']) 32 # Restart the script so python now finds the autotest site-packages. 33 sys.exit(os.execv(__file__, sys.argv)) 34 35 36 _STDOUT_IS_TTY = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty() 37 38 39 def Die(message_format, *args, **kwargs): 40 """Log a message and kill the current process. 41 42 @param message_format: string for logging.error. 43 44 """ 45 logging.error(message_format, *args, **kwargs) 46 sys.exit(1) 47 48 49 class CrashWaiver: 50 """Represents a crash that we want to ignore for now.""" 51 def __init__(self, signals, deadline, url, person): 52 self.signals = signals 53 self.deadline = datetime.datetime.strptime(deadline, '%Y-%b-%d') 54 self.issue_url = url 55 self.suppressor = person 56 57 # List of crashes which are okay to ignore. This list should almost always be 58 # empty. If you add an entry, include the bug URL and your name, something like 59 # 'crashy':CrashWaiver( 60 # ['sig 11'], '2011-Aug-18', 'http://crosbug/123456', 'developer'), 61 62 _CRASH_WHITELIST = { 63 } 64 65 66 class ResultCollector(object): 67 """Collects status and performance data from an autoserv results dir.""" 68 69 def __init__(self, collect_perf=True, collect_attr=False, 70 collect_info=False, escape_error=False, 71 whitelist_chrome_crashes=False): 72 """Initialize ResultsCollector class. 73 74 @param collect_perf: Should perf keyvals be collected? 75 @param collect_attr: Should attr keyvals be collected? 76 @param collect_info: Should info keyvals be collected? 77 @param escape_error: Escape error message text for tools. 78 @param whitelist_chrome_crashes: Treat Chrome crashes as non-fatal. 79 80 """ 81 self._collect_perf = collect_perf 82 self._collect_attr = collect_attr 83 self._collect_info = collect_info 84 self._escape_error = escape_error 85 self._whitelist_chrome_crashes = whitelist_chrome_crashes 86 87 def _CollectPerf(self, testdir): 88 """Parses keyval file under testdir and return the perf keyval pairs. 89 90 @param testdir: autoserv test result directory path. 91 92 @return dict of perf keyval pairs. 93 94 """ 95 if not self._collect_perf: 96 return {} 97 return self._CollectKeyval(testdir, 'perf') 98 99 def _CollectAttr(self, testdir): 100 """Parses keyval file under testdir and return the attr keyval pairs. 101 102 @param testdir: autoserv test result directory path. 103 104 @return dict of attr keyval pairs. 105 106 """ 107 if not self._collect_attr: 108 return {} 109 return self._CollectKeyval(testdir, 'attr') 110 111 def _CollectKeyval(self, testdir, keyword): 112 """Parses keyval file under testdir. 113 114 If testdir contains a result folder, process the keyval file and return 115 a dictionary of perf keyval pairs. 116 117 @param testdir: The autoserv test result directory. 118 @param keyword: The keyword of keyval, either 'perf' or 'attr'. 119 120 @return If the perf option is disabled or the there's no keyval file 121 under testdir, returns an empty dictionary. Otherwise, returns 122 a dictionary of parsed keyvals. Duplicate keys are uniquified 123 by their instance number. 124 125 """ 126 keyval = {} 127 keyval_file = os.path.join(testdir, 'results', 'keyval') 128 if not os.path.isfile(keyval_file): 129 return keyval 130 131 instances = {} 132 133 for line in open(keyval_file): 134 match = re.search(r'^(.+){%s}=(.+)$' % keyword, line) 135 if match: 136 key = match.group(1) 137 val = match.group(2) 138 139 # If the same key name was generated multiple times, uniquify 140 # all instances other than the first one by adding the instance 141 # count to the key name. 142 key_inst = key 143 instance = instances.get(key, 0) 144 if instance: 145 key_inst = '%s{%d}' % (key, instance) 146 instances[key] = instance + 1 147 148 keyval[key_inst] = val 149 150 return keyval 151 152 def _CollectCrashes(self, status_raw): 153 """Parses status_raw file for crashes. 154 155 Saves crash details if crashes are discovered. If a whitelist is 156 present, only records whitelisted crashes. 157 158 @param status_raw: The contents of the status.log or status file from 159 the test. 160 161 @return a list of crash entries to be reported. 162 163 """ 164 crashes = [] 165 regex = re.compile( 166 'Received crash notification for ([-\w]+).+ (sig \d+)') 167 chrome_regex = re.compile(r'^supplied_[cC]hrome|^chrome$') 168 for match in regex.finditer(status_raw): 169 w = _CRASH_WHITELIST.get(match.group(1)) 170 if (self._whitelist_chrome_crashes and 171 chrome_regex.match(match.group(1))): 172 print '@@@STEP_WARNINGS@@@' 173 print '%s crashed with %s' % (match.group(1), match.group(2)) 174 elif (w is not None and match.group(2) in w.signals and 175 w.deadline > datetime.datetime.now()): 176 print 'Ignoring crash in %s for waiver that expires %s' % ( 177 match.group(1), w.deadline.strftime('%Y-%b-%d')) 178 else: 179 crashes.append('%s %s' % match.groups()) 180 return crashes 181 182 def _CollectInfo(self, testdir, custom_info): 183 """Parses *_info files under testdir/sysinfo/var/log. 184 185 If the sysinfo/var/log/*info files exist, save information that shows 186 hw, ec and bios version info. 187 188 This collection of extra info is disabled by default (this funtion is 189 a no-op). It is enabled only if the --info command-line option is 190 explicitly supplied. Normal job parsing does not supply this option. 191 192 @param testdir: The autoserv test result directory. 193 @param custom_info: Dictionary to collect detailed ec/bios info. 194 195 @return a dictionary of info that was discovered. 196 197 """ 198 if not self._collect_info: 199 return {} 200 info = custom_info 201 202 sysinfo_dir = os.path.join(testdir, 'sysinfo', 'var', 'log') 203 for info_file, info_keys in {'ec_info.txt': ['fw_version'], 204 'bios_info.txt': ['fwid', 205 'hwid']}.iteritems(): 206 info_file_path = os.path.join(sysinfo_dir, info_file) 207 if not os.path.isfile(info_file_path): 208 continue 209 # Some example raw text that might be matched include: 210 # 211 # fw_version | snow_v1.1.332-cf20b3e 212 # fwid = Google_Snow.2711.0.2012_08_06_1139 # Active firmware ID 213 # hwid = DAISY TEST A-A 9382 # Hardware ID 214 info_regex = re.compile(r'^(%s)\s*[|=]\s*(.*)' % 215 '|'.join(info_keys)) 216 with open(info_file_path, 'r') as f: 217 for line in f: 218 line = line.strip() 219 line = line.split('#')[0] 220 match = info_regex.match(line) 221 if match: 222 info[match.group(1)] = str(match.group(2)).strip() 223 return info 224 225 def _CollectEndTimes(self, status_raw, status_re='', is_end=True): 226 """Helper to match and collect timestamp and localtime. 227 228 Preferred to locate timestamp and localtime with an 229 'END GOOD test_name...' line. However, aborted tests occasionally fail 230 to produce this line and then need to scrape timestamps from the 'START 231 test_name...' line. 232 233 @param status_raw: multi-line text to search. 234 @param status_re: status regex to seek (e.g. GOOD|FAIL) 235 @param is_end: if True, search for 'END' otherwise 'START'. 236 237 @return Tuple of timestamp, localtime retrieved from the test status 238 log. 239 240 """ 241 timestamp = '' 242 localtime = '' 243 244 localtime_re = r'\w+\s+\w+\s+[:\w]+' 245 match_filter = ( 246 r'^\s*%s\s+(?:%s).*timestamp=(\d*).*localtime=(%s).*$' % ( 247 'END' if is_end else 'START', status_re, localtime_re)) 248 matches = re.findall(match_filter, status_raw, re.MULTILINE) 249 if matches: 250 # There may be multiple lines with timestamp/localtime info. 251 # The last one found is selected because it will reflect the end 252 # time. 253 for i in xrange(len(matches)): 254 timestamp_, localtime_ = matches[-(i+1)] 255 if not timestamp or timestamp_ > timestamp: 256 timestamp = timestamp_ 257 localtime = localtime_ 258 return timestamp, localtime 259 260 def _CheckExperimental(self, testdir): 261 """Parses keyval file and return the value of `experimental`. 262 263 @param testdir: The result directory that has the keyval file. 264 265 @return The value of 'experimental', which is a boolean value indicating 266 whether it is an experimental test or not. 267 268 """ 269 keyval_file = os.path.join(testdir, 'keyval') 270 if not os.path.isfile(keyval_file): 271 return False 272 273 with open(keyval_file) as f: 274 for line in f: 275 match = re.match(r'experimental=(.+)', line) 276 if match: 277 return match.group(1) == 'True' 278 else: 279 return False 280 281 282 def _CollectResult(self, testdir, results, is_experimental=False): 283 """Collects results stored under testdir into a dictionary. 284 285 The presence/location of status files (status.log, status and 286 job_report.html) varies depending on whether the job is a simple 287 client test, simple server test, old-style suite or new-style 288 suite. For example: 289 -In some cases a single job_report.html may exist but many times 290 multiple instances are produced in a result tree. 291 -Most tests will produce a status.log but client tests invoked 292 by a server test will only emit a status file. 293 294 The two common criteria that seem to define the presence of a 295 valid test result are: 296 1. Existence of a 'status.log' or 'status' file. Note that if both a 297 'status.log' and 'status' file exist for a test, the 'status' file 298 is always a subset of the 'status.log' fle contents. 299 2. Presence of a 'debug' directory. 300 301 In some cases multiple 'status.log' files will exist where the parent 302 'status.log' contains the contents of multiple subdirectory 'status.log' 303 files. Parent and subdirectory 'status.log' files are always expected 304 to agree on the outcome of a given test. 305 306 The test results discovered from the 'status*' files are included 307 in the result dictionary. The test directory name and a test directory 308 timestamp/localtime are saved to be used as sort keys for the results. 309 310 The value of 'is_experimental' is included in the result dictionary. 311 312 @param testdir: The autoserv test result directory. 313 @param results: A list to which a populated test-result-dictionary will 314 be appended if a status file is found. 315 @param is_experimental: A boolean value indicating whether the result 316 directory is for an experimental test. 317 318 """ 319 status_file = os.path.join(testdir, 'status.log') 320 if not os.path.isfile(status_file): 321 status_file = os.path.join(testdir, 'status') 322 if not os.path.isfile(status_file): 323 return 324 325 # Status is True if GOOD, else False for all others. 326 status = False 327 error_msg = None 328 status_raw = open(status_file, 'r').read() 329 failure_tags = 'ABORT|ERROR|FAIL' 330 warning_tag = 'WARN|TEST_NA' 331 failure = re.search(r'%s' % failure_tags, status_raw) 332 warning = re.search(r'%s' % warning_tag, status_raw) and not failure 333 good = (re.search(r'GOOD.+completed successfully', status_raw) and 334 not (failure or warning)) 335 336 # We'd like warnings to allow the tests to pass, but still gather info. 337 if good or warning: 338 status = True 339 340 if not good: 341 match = re.search(r'^\t+(%s|%s)\t(.+)' % (failure_tags, 342 warning_tag), 343 status_raw, re.MULTILINE) 344 if match: 345 failure_type = match.group(1) 346 reason = match.group(2).split('\t')[4] 347 if self._escape_error: 348 reason = re.escape(reason) 349 error_msg = ': '.join([failure_type, reason]) 350 351 # Grab the timestamp - can be used for sorting the test runs. 352 # Grab the localtime - may be printed to enable line filtering by date. 353 # Designed to match a line like this: 354 # END GOOD testname ... timestamp=1347324321 localtime=Sep 10 17:45:21 355 status_re = r'GOOD|%s|%s' % (failure_tags, warning_tag) 356 timestamp, localtime = self._CollectEndTimes(status_raw, status_re) 357 # Hung tests will occasionally skip printing the END line so grab 358 # a default timestamp from the START line in those cases. 359 if not timestamp: 360 timestamp, localtime = self._CollectEndTimes(status_raw, 361 is_end=False) 362 363 results.append({ 364 'testdir': testdir, 365 'crashes': self._CollectCrashes(status_raw), 366 'status': status, 367 'error_msg': error_msg, 368 'localtime': localtime, 369 'timestamp': timestamp, 370 'perf': self._CollectPerf(testdir), 371 'attr': self._CollectAttr(testdir), 372 'info': self._CollectInfo(testdir, {'localtime': localtime, 373 'timestamp': timestamp}), 374 'experimental': is_experimental}) 375 376 def RecursivelyCollectResults(self, resdir, parent_experimental_tag=False): 377 """Recursively collect results into a list of dictionaries. 378 379 Only recurses into directories that possess a 'debug' subdirectory 380 because anything else is not considered a 'test' directory. 381 382 The value of 'experimental' in keyval file is used to determine whether 383 the result is for an experimental test. If it is, all its sub 384 directories are considered to be experimental tests too. 385 386 @param resdir: results/test directory to parse results from and recurse 387 into. 388 @param parent_experimental_tag: A boolean value, used to keep track of 389 whether its parent directory is for an experimental test. 390 391 @return List of dictionaries of results. 392 393 """ 394 results = [] 395 is_experimental = (parent_experimental_tag or 396 self._CheckExperimental(resdir)) 397 self._CollectResult(resdir, results, is_experimental) 398 for testdir in glob.glob(os.path.join(resdir, '*')): 399 # Remove false positives that are missing a debug dir. 400 if not os.path.exists(os.path.join(testdir, 'debug')): 401 continue 402 403 results.extend(self.RecursivelyCollectResults( 404 testdir, is_experimental)) 405 return results 406 407 408 class ReportGenerator(object): 409 """Collects and displays data from autoserv results directories. 410 411 This class collects status and performance data from one or more autoserv 412 result directories and generates test reports. 413 """ 414 415 _KEYVAL_INDENT = 2 416 _STATUS_STRINGS = {'hr': {'pass': '[ PASSED ]', 'fail': '[ FAILED ]'}, 417 'csv': {'pass': 'PASS', 'fail': 'FAIL'}} 418 419 def __init__(self, options, args): 420 self._options = options 421 self._args = args 422 self._color = terminal.Color(options.color) 423 self._results = [] 424 425 def _CollectAllResults(self): 426 """Parses results into the self._results list. 427 428 Builds a list (self._results) where each entry is a dictionary of 429 result data from one test (which may contain other tests). Each 430 dictionary will contain values such as: test folder, status, localtime, 431 crashes, error_msg, perf keyvals [optional], info [optional]. 432 433 """ 434 collector = ResultCollector( 435 collect_perf=self._options.perf, 436 collect_attr=self._options.attr, 437 collect_info=self._options.info, 438 escape_error=self._options.escape_error, 439 whitelist_chrome_crashes=self._options.whitelist_chrome_crashes) 440 441 for resdir in self._args: 442 if not os.path.isdir(resdir): 443 Die('%r does not exist', resdir) 444 self._results.extend(collector.RecursivelyCollectResults(resdir)) 445 446 if not self._results: 447 Die('no test directories found') 448 449 def _GenStatusString(self, status): 450 """Given a bool indicating success or failure, return the right string. 451 452 Also takes --csv into account, returns old-style strings if it is set. 453 454 @param status: True or False, indicating success or failure. 455 456 @return The appropriate string for printing.. 457 458 """ 459 success = 'pass' if status else 'fail' 460 if self._options.csv: 461 return self._STATUS_STRINGS['csv'][success] 462 return self._STATUS_STRINGS['hr'][success] 463 464 def _Indent(self, msg): 465 """Given a message, indents it appropriately. 466 467 @param msg: string to indent. 468 @return indented version of msg. 469 470 """ 471 return ' ' * self._KEYVAL_INDENT + msg 472 473 def _GetTestColumnWidth(self): 474 """Returns the test column width based on the test data. 475 476 The test results are aligned by discovering the longest width test 477 directory name or perf key stored in the list of result dictionaries. 478 479 @return The width for the test column. 480 481 """ 482 width = 0 483 for result in self._results: 484 width = max(width, len(result['testdir'])) 485 perf = result.get('perf') 486 if perf: 487 perf_key_width = len(max(perf, key=len)) 488 width = max(width, perf_key_width + self._KEYVAL_INDENT) 489 return width 490 491 def _PrintDashLine(self, width): 492 """Prints a line of dashes as a separator in output. 493 494 @param width: an integer. 495 """ 496 if not self._options.csv: 497 print ''.ljust(width + len(self._STATUS_STRINGS['hr']['pass']), '-') 498 499 def _PrintEntries(self, entries): 500 """Prints a list of strings, delimited based on --csv flag. 501 502 @param entries: a list of strings, entities to output. 503 504 """ 505 delimiter = ',' if self._options.csv else ' ' 506 print delimiter.join(entries) 507 508 def _PrintErrors(self, test, error_msg): 509 """Prints an indented error message, unless the --csv flag is set. 510 511 @param test: the name of a test with which to prefix the line. 512 @param error_msg: a message to print. None is allowed, but ignored. 513 514 """ 515 if not self._options.csv and error_msg: 516 self._PrintEntries([test, self._Indent(error_msg)]) 517 518 def _PrintErrorLogs(self, test, test_string): 519 """Prints the error log for |test| if --debug is set. 520 521 @param test: the name of a test suitable for embedding in a path 522 @param test_string: the name of a test with which to prefix the line. 523 524 """ 525 if self._options.print_debug: 526 debug_file_regex = os.path.join( 527 'results.', test, 'debug', 528 '%s*.ERROR' % os.path.basename(test)) 529 for path in glob.glob(debug_file_regex): 530 try: 531 with open(path) as fh: 532 for line in fh: 533 # Ensure line is not just WS. 534 if len(line.lstrip()) <= 0: 535 continue 536 self._PrintEntries( 537 [test_string, self._Indent(line.rstrip())]) 538 except IOError: 539 print 'Could not open %s' % path 540 541 def _PrintResultDictKeyVals(self, test_entry, result_dict): 542 """Formatted print a dict of keyvals like 'perf' or 'info'. 543 544 This function emits each keyval on a single line for uncompressed 545 review. The 'perf' dictionary contains performance keyvals while the 546 'info' dictionary contains ec info, bios info and some test timestamps. 547 548 @param test_entry: The unique name of the test (dir) - matches other 549 test output. 550 @param result_dict: A dict of keyvals to be presented. 551 552 """ 553 if not result_dict: 554 return 555 dict_keys = result_dict.keys() 556 dict_keys.sort() 557 width = self._GetTestColumnWidth() 558 for dict_key in dict_keys: 559 if self._options.csv: 560 key_entry = dict_key 561 else: 562 key_entry = dict_key.ljust(width - self._KEYVAL_INDENT) 563 key_entry = key_entry.rjust(width) 564 value_entry = self._color.Color( 565 self._color.BOLD, result_dict[dict_key]) 566 self._PrintEntries([test_entry, key_entry, value_entry]) 567 568 def _GetSortedTests(self): 569 """Sort the test result dicts in preparation for results printing. 570 571 By default sorts the results directionaries by their test names. 572 However, when running long suites, it is useful to see if an early test 573 has wedged the system and caused the remaining tests to abort/fail. The 574 datetime-based chronological sorting allows this view. 575 576 Uses the --sort-chron command line option to control. 577 578 """ 579 if self._options.sort_chron: 580 # Need to reverse sort the test dirs to ensure the suite folder 581 # shows at the bottom. Because the suite folder shares its datetime 582 # with the last test it shows second-to-last without the reverse 583 # sort first. 584 tests = sorted(self._results, key=operator.itemgetter('testdir'), 585 reverse=True) 586 tests = sorted(tests, key=operator.itemgetter('timestamp')) 587 else: 588 tests = sorted(self._results, key=operator.itemgetter('testdir')) 589 return tests 590 591 def _GenerateReportText(self): 592 """Prints a result report to stdout. 593 594 Prints a result table to stdout. Each row of the table contains the 595 test result directory and the test result (PASS, FAIL). If the perf 596 option is enabled, each test entry is followed by perf keyval entries 597 from the test results. 598 599 """ 600 tests = self._GetSortedTests() 601 width = self._GetTestColumnWidth() 602 603 crashes = {} 604 tests_pass = 0 605 self._PrintDashLine(width) 606 607 for result in tests: 608 testdir = result['testdir'] 609 test_entry = testdir if self._options.csv else testdir.ljust(width) 610 611 status_entry = self._GenStatusString(result['status']) 612 if result['status']: 613 color = self._color.GREEN 614 tests_pass += 1 615 else: 616 color = self._color.RED 617 618 test_entries = [test_entry, self._color.Color(color, status_entry)] 619 620 info = result.get('info', {}) 621 info.update(result.get('attr', {})) 622 if self._options.csv and (self._options.info or self._options.attr): 623 if info: 624 test_entries.extend(['%s=%s' % (k, info[k]) 625 for k in sorted(info.keys())]) 626 if not result['status'] and result['error_msg']: 627 test_entries.append('reason="%s"' % result['error_msg']) 628 629 self._PrintEntries(test_entries) 630 self._PrintErrors(test_entry, result['error_msg']) 631 632 # Print out error log for failed tests. 633 if not result['status']: 634 self._PrintErrorLogs(testdir, test_entry) 635 636 # Emit the perf keyvals entries. There will be no entries if the 637 # --no-perf option is specified. 638 self._PrintResultDictKeyVals(test_entry, result['perf']) 639 640 # Determine that there was a crash during this test. 641 if result['crashes']: 642 for crash in result['crashes']: 643 if not crash in crashes: 644 crashes[crash] = set([]) 645 crashes[crash].add(testdir) 646 647 # Emit extra test metadata info on separate lines if not --csv. 648 if not self._options.csv: 649 self._PrintResultDictKeyVals(test_entry, info) 650 651 self._PrintDashLine(width) 652 653 if not self._options.csv: 654 total_tests = len(tests) 655 percent_pass = 100 * tests_pass / total_tests 656 pass_str = '%d/%d (%d%%)' % (tests_pass, total_tests, percent_pass) 657 print 'Total PASS: ' + self._color.Color(self._color.BOLD, pass_str) 658 659 if self._options.crash_detection: 660 print '' 661 if crashes: 662 print self._color.Color(self._color.RED, 663 'Crashes detected during testing:') 664 self._PrintDashLine(width) 665 666 for crash_name, crashed_tests in sorted(crashes.iteritems()): 667 print self._color.Color(self._color.RED, crash_name) 668 for crashed_test in crashed_tests: 669 print self._Indent(crashed_test) 670 671 self._PrintDashLine(width) 672 print ('Total unique crashes: ' + 673 self._color.Color(self._color.BOLD, str(len(crashes)))) 674 675 # Sometimes the builders exit before these buffers are flushed. 676 sys.stderr.flush() 677 sys.stdout.flush() 678 679 def Run(self): 680 """Runs report generation.""" 681 self._CollectAllResults() 682 self._GenerateReportText() 683 for d in self._results: 684 if d['experimental'] and self._options.ignore_experimental_tests: 685 continue 686 if not d['status'] or ( 687 self._options.crash_detection and d['crashes']): 688 sys.exit(1) 689 690 691 def main(): 692 usage = 'Usage: %prog [options] result-directories...' 693 parser = optparse.OptionParser(usage=usage) 694 parser.add_option('--color', dest='color', action='store_true', 695 default=_STDOUT_IS_TTY, 696 help='Use color for text reports [default if TTY stdout]') 697 parser.add_option('--no-color', dest='color', action='store_false', 698 help='Don\'t use color for text reports') 699 parser.add_option('--no-crash-detection', dest='crash_detection', 700 action='store_false', default=True, 701 help='Don\'t report crashes or error out when detected') 702 parser.add_option('--csv', dest='csv', action='store_true', 703 help='Output test result in CSV format. ' 704 'Implies --no-debug --no-crash-detection.') 705 parser.add_option('--info', dest='info', action='store_true', 706 default=False, 707 help='Include info keyvals in the report') 708 parser.add_option('--escape-error', dest='escape_error', 709 action='store_true', default=False, 710 help='Escape error message text for tools.') 711 parser.add_option('--perf', dest='perf', action='store_true', 712 default=True, 713 help='Include perf keyvals in the report [default]') 714 parser.add_option('--attr', dest='attr', action='store_true', 715 default=False, 716 help='Include attr keyvals in the report') 717 parser.add_option('--no-perf', dest='perf', action='store_false', 718 help='Don\'t include perf keyvals in the report') 719 parser.add_option('--sort-chron', dest='sort_chron', action='store_true', 720 default=False, 721 help='Sort results by datetime instead of by test name.') 722 parser.add_option('--no-debug', dest='print_debug', action='store_false', 723 default=True, 724 help='Don\'t print out logs when tests fail.') 725 parser.add_option('--whitelist_chrome_crashes', 726 dest='whitelist_chrome_crashes', 727 action='store_true', default=False, 728 help='Treat Chrome crashes as non-fatal.') 729 parser.add_option('--ignore_experimental_tests', 730 dest='ignore_experimental_tests', 731 action='store_true', default=False, 732 help='If set, experimental test results will not ' 733 'influence the exit code.') 734 735 (options, args) = parser.parse_args() 736 737 if not args: 738 parser.print_help() 739 Die('no result directories provided') 740 741 if options.csv and (options.print_debug or options.crash_detection): 742 Warning('Forcing --no-debug --no-crash-detection') 743 options.print_debug = False 744 options.crash_detection = False 745 746 generator = ReportGenerator(options, args) 747 generator.Run() 748 749 750 if __name__ == '__main__': 751 main() 752