Home | History | Annotate | Download | only in views
      1 #!/usr/bin/env python
      2 # Copyright (C) 2012 Google Inc. All rights reserved.
      3 #
      4 # Redistribution and use in source and binary forms, with or without
      5 # modification, are permitted provided that the following conditions are
      6 # met:
      7 #
      8 #     * Redistributions of source code must retain the above copyright
      9 # notice, this list of conditions and the following disclaimer.
     10 #     * Redistributions in binary form must reproduce the above
     11 # copyright notice, this list of conditions and the following disclaimer
     12 # in the documentation and/or other materials provided with the
     13 # distribution.
     14 #     * Neither the name of Google Inc. nor the names of its
     15 # contributors may be used to endorse or promote products derived from
     16 # this software without specific prior written permission.
     17 #
     18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     19 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     20 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     21 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     22 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     23 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     24 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     25 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     26 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     28 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     29 
     30 
     31 from webkitpy.layout_tests.models import test_expectations
     32 
     33 from webkitpy.common.net import layouttestresults
     34 
     35 
     36 TestExpectations = test_expectations.TestExpectations
     37 TestExpectationParser = test_expectations.TestExpectationParser
     38 
     39 
     40 class BuildBotPrinter(object):
     41     # This output is parsed by buildbots and must only be changed in coordination with buildbot scripts (see webkit.org's
     42     # Tools/BuildSlaveSupport/build.webkit.org-config/master.cfg: RunWebKitTests._parseNewRunWebKitTestsOutput
     43     # and chromium.org's buildbot/master.chromium/scripts/master/log_parser/webkit_test_command.py).
     44 
     45     def __init__(self, stream, debug_logging):
     46         self.stream = stream
     47         self.debug_logging = debug_logging
     48 
     49     def print_results(self, run_details):
     50         if self.debug_logging:
     51             self.print_run_results(run_details.initial_results)
     52         self.print_unexpected_results(run_details.summarized_full_results, run_details.enabled_pixel_tests_in_retry)
     53 
     54     def _print(self, msg):
     55         self.stream.write(msg + '\n')
     56 
     57     def print_run_results(self, run_results):
     58         failed = run_results.total_failures
     59         total = run_results.total
     60         passed = total - failed - run_results.remaining
     61         percent_passed = 0.0
     62         if total > 0:
     63             percent_passed = float(passed) * 100 / total
     64 
     65         self._print("=> Results: %d/%d tests passed (%.1f%%)" % (passed, total, percent_passed))
     66         self._print("")
     67         self._print_run_results_entry(run_results, test_expectations.NOW, "Tests to be fixed")
     68 
     69         self._print("")
     70         # FIXME: We should be skipping anything marked WONTFIX, so we shouldn't bother logging these stats.
     71         self._print_run_results_entry(run_results, test_expectations.WONTFIX,
     72             "Tests that will only be fixed if they crash (WONTFIX)")
     73         self._print("")
     74 
     75     def _print_run_results_entry(self, run_results, timeline, heading):
     76         total = len(run_results.tests_by_timeline[timeline])
     77         not_passing = (total -
     78             len(run_results.tests_by_expectation[test_expectations.PASS] &
     79                 run_results.tests_by_timeline[timeline]))
     80         self._print("=> %s (%d):" % (heading, not_passing))
     81 
     82         for result in TestExpectations.EXPECTATION_DESCRIPTIONS.keys():
     83             if result in (test_expectations.PASS, test_expectations.SKIP):
     84                 continue
     85             results = (run_results.tests_by_expectation[result] & run_results.tests_by_timeline[timeline])
     86             desc = TestExpectations.EXPECTATION_DESCRIPTIONS[result]
     87             if not_passing and len(results):
     88                 pct = len(results) * 100.0 / not_passing
     89                 self._print("  %5d %-24s (%4.1f%%)" % (len(results), desc, pct))
     90 
     91     def print_unexpected_results(self, summarized_results, enabled_pixel_tests_in_retry=False):
     92         passes = {}
     93         flaky = {}
     94         regressions = {}
     95 
     96         def add_to_dict_of_lists(dict, key, value):
     97             dict.setdefault(key, []).append(value)
     98 
     99         def add_result(test, results, passes=passes, flaky=flaky, regressions=regressions):
    100             actual = results['actual'].split(" ")
    101             expected = results['expected'].split(" ")
    102 
    103             if 'is_unexpected' not in results or not results['is_unexpected']:
    104                 # Don't print anything for tests that ran as expected.
    105                 return
    106 
    107             if actual == ['PASS']:
    108                 if 'CRASH' in expected:
    109                     add_to_dict_of_lists(passes, 'Expected to crash, but passed', test)
    110                 elif 'TIMEOUT' in expected:
    111                     add_to_dict_of_lists(passes, 'Expected to timeout, but passed', test)
    112                 else:
    113                     add_to_dict_of_lists(passes, 'Expected to fail, but passed', test)
    114             elif enabled_pixel_tests_in_retry and actual == ['TEXT', 'IMAGE+TEXT']:
    115                 add_to_dict_of_lists(regressions, actual[0], test)
    116             elif len(actual) > 1:
    117                 # We group flaky tests by the first actual result we got.
    118                 add_to_dict_of_lists(flaky, actual[0], test)
    119             else:
    120                 add_to_dict_of_lists(regressions, results['actual'], test)
    121 
    122         layouttestresults.for_each_test(summarized_results['tests'], add_result)
    123 
    124         if len(passes) or len(flaky) or len(regressions):
    125             self._print("")
    126         if len(passes):
    127             for key, tests in passes.iteritems():
    128                 self._print("%s: (%d)" % (key, len(tests)))
    129                 tests.sort()
    130                 for test in tests:
    131                     self._print("  %s" % test)
    132                 self._print("")
    133             self._print("")
    134 
    135         if len(flaky):
    136             descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
    137             for key, tests in flaky.iteritems():
    138                 result = TestExpectations.EXPECTATIONS[key.lower()]
    139                 self._print("Unexpected flakiness: %s (%d)" % (descriptions[result], len(tests)))
    140                 tests.sort()
    141 
    142                 for test in tests:
    143                     result = layouttestresults.result_for_test(summarized_results['tests'], test)
    144                     actual = result['actual'].split(" ")
    145                     expected = result['expected'].split(" ")
    146                     result = TestExpectations.EXPECTATIONS[key.lower()]
    147                     # FIXME: clean this up once the old syntax is gone
    148                     new_expectations_list = [TestExpectationParser._inverted_expectation_tokens[exp] for exp in list(set(actual) | set(expected))]
    149                     self._print("  %s [ %s ]" % (test, " ".join(new_expectations_list)))
    150                 self._print("")
    151             self._print("")
    152 
    153         if len(regressions):
    154             descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
    155             for key, tests in regressions.iteritems():
    156                 result = TestExpectations.EXPECTATIONS[key.lower()]
    157                 self._print("Regressions: Unexpected %s (%d)" % (descriptions[result], len(tests)))
    158                 tests.sort()
    159                 for test in tests:
    160                     self._print("  %s [ %s ]" % (test, TestExpectationParser._inverted_expectation_tokens[key]))
    161                 self._print("")
    162 
    163         if len(summarized_results['tests']) and self.debug_logging:
    164             self._print("%s" % ("-" * 78))
    165