Home | History | Annotate | Download | only in controllers
      1 # Copyright (C) 2010 Google Inc. All rights reserved.
      2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor (at] inf.u-szeged.hu), University of Szeged
      3 #
      4 # Redistribution and use in source and binary forms, with or without
      5 # modification, are permitted provided that the following conditions are
      6 # met:
      7 #
      8 #     * Redistributions of source code must retain the above copyright
      9 # notice, this list of conditions and the following disclaimer.
     10 #     * Redistributions in binary form must reproduce the above
     11 # copyright notice, this list of conditions and the following disclaimer
     12 # in the documentation and/or other materials provided with the
     13 # distribution.
     14 #     * Neither the name of Google Inc. nor the names of its
     15 # contributors may be used to endorse or promote products derived from
     16 # this software without specific prior written permission.
     17 #
     18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     19 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     20 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     21 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     22 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     23 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     24 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     25 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     26 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     28 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     29 
     30 """
     31 The Manager runs a series of tests (TestType interface) against a set
     32 of test files.  If a test file fails a TestType, it returns a list of TestFailure
     33 objects to the Manager. The Manager then aggregates the TestFailures to
     34 create a final report.
     35 """
     36 
     37 import datetime
     38 import json
     39 import logging
     40 import random
     41 import sys
     42 import time
     43 
     44 from webkitpy.common.net.file_uploader import FileUploader
     45 from webkitpy.layout_tests.controllers.layout_test_finder import LayoutTestFinder
     46 from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunner
     47 from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
     48 from webkitpy.layout_tests.layout_package import json_results_generator
     49 from webkitpy.layout_tests.models import test_expectations
     50 from webkitpy.layout_tests.models import test_failures
     51 from webkitpy.layout_tests.models import test_run_results
     52 from webkitpy.layout_tests.models.test_input import TestInput
     53 
     54 _log = logging.getLogger(__name__)
     55 
     56 # Builder base URL where we have the archived test results.
     57 BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/"
     58 
     59 TestExpectations = test_expectations.TestExpectations
     60 
     61 
     62 
     63 class Manager(object):
     64     """A class for managing running a series of tests on a series of layout
     65     test files."""
     66 
     67     def __init__(self, port, options, printer):
     68         """Initialize test runner data structures.
     69 
     70         Args:
     71           port: an object implementing port-specific
     72           options: a dictionary of command line options
     73           printer: a Printer object to record updates to.
     74         """
     75         self._port = port
     76         self._filesystem = port.host.filesystem
     77         self._options = options
     78         self._printer = printer
     79         self._expectations = None
     80 
     81         self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR
     82         self.PERF_SUBDIR = 'perf'
     83         self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
     84         self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
     85         self._http_server_started = False
     86         self._websockets_server_started = False
     87 
     88         self._results_directory = self._port.results_directory()
     89         self._finder = LayoutTestFinder(self._port, self._options)
     90         self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow)
     91 
     92     def _collect_tests(self, args):
     93         return self._finder.find_tests(self._options, args)
     94 
     95     def _is_http_test(self, test):
     96         return self.HTTP_SUBDIR in test or self._is_websocket_test(test)
     97 
     98     def _is_websocket_test(self, test):
     99         return self.WEBSOCKET_SUBDIR in test
    100 
    101     def _http_tests(self, test_names):
    102         return set(test for test in test_names if self._is_http_test(test))
    103 
    104     def _is_perf_test(self, test):
    105         return self.PERF_SUBDIR == test or (self.PERF_SUBDIR + self._port.TEST_PATH_SEPARATOR) in test
    106 
    107     def _prepare_lists(self, paths, test_names):
    108         tests_to_skip = self._finder.skip_tests(paths, test_names, self._expectations, self._http_tests(test_names))
    109         tests_to_run = [test for test in test_names if test not in tests_to_skip]
    110 
    111         # Create a sorted list of test files so the subset chunk,
    112         # if used, contains alphabetically consecutive tests.
    113         if self._options.order == 'natural':
    114             tests_to_run.sort(key=self._port.test_key)
    115         elif self._options.order == 'random':
    116             random.shuffle(tests_to_run)
    117         elif self._options.order == 'random-seeded':
    118             rnd = random.Random()
    119             rnd.seed(4) # http://xkcd.com/221/
    120             rnd.shuffle(tests_to_run)
    121 
    122         tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tests_to_run)
    123         self._expectations.add_extra_skipped_tests(tests_in_other_chunks)
    124         tests_to_skip.update(tests_in_other_chunks)
    125 
    126         return tests_to_run, tests_to_skip
    127 
    128     def _test_input_for_file(self, test_file):
    129         return TestInput(test_file,
    130             self._options.slow_time_out_ms if self._test_is_slow(test_file) else self._options.time_out_ms,
    131             self._test_requires_lock(test_file),
    132             should_add_missing_baselines=(self._options.new_test_results and not self._test_is_expected_missing(test_file)))
    133 
    134     def _test_requires_lock(self, test_file):
    135         """Return True if the test needs to be locked when
    136         running multiple copies of NRWTs. Perf tests are locked
    137         because heavy load caused by running other tests in parallel
    138         might cause some of them to timeout."""
    139         return self._is_http_test(test_file) or self._is_perf_test(test_file)
    140 
    141     def _test_is_expected_missing(self, test_file):
    142         expectations = self._expectations.model().get_expectations(test_file)
    143         return test_expectations.MISSING in expectations or test_expectations.NEEDS_REBASELINE in expectations or test_expectations.NEEDS_MANUAL_REBASELINE in expectations
    144 
    145     def _test_is_slow(self, test_file):
    146         return test_expectations.SLOW in self._expectations.model().get_expectations(test_file)
    147 
    148     def needs_servers(self, test_names):
    149         return any(self._test_requires_lock(test_name) for test_name in test_names)
    150 
    151     def _set_up_run(self, test_names):
    152         self._printer.write_update("Checking build ...")
    153         if not self._port.check_build(self.needs_servers(test_names)):
    154             _log.error("Build check failed")
    155             return False
    156 
    157         # This must be started before we check the system dependencies,
    158         # since the helper may do things to make the setup correct.
    159         if self._options.pixel_tests:
    160             self._printer.write_update("Starting pixel test helper ...")
    161             self._port.start_helper()
    162 
    163         # Check that the system dependencies (themes, fonts, ...) are correct.
    164         if not self._options.nocheck_sys_deps:
    165             self._printer.write_update("Checking system dependencies ...")
    166             if not self._port.check_sys_deps(self.needs_servers(test_names)):
    167                 self._port.stop_helper()
    168                 return False
    169 
    170         if self._options.clobber_old_results:
    171             self._clobber_old_results()
    172 
    173         # Create the output directory if it doesn't already exist.
    174         self._port.host.filesystem.maybe_make_directory(self._results_directory)
    175 
    176         self._port.setup_test_run()
    177         return True
    178 
    179     def run(self, args):
    180         """Run the tests and return a RunDetails object with the results."""
    181         start_time = time.time()
    182         self._printer.write_update("Collecting tests ...")
    183         try:
    184             paths, test_names = self._collect_tests(args)
    185         except IOError:
    186             # This is raised if --test-list doesn't exist
    187             return test_run_results.RunDetails(exit_code=-1)
    188 
    189         self._printer.write_update("Parsing expectations ...")
    190         self._expectations = test_expectations.TestExpectations(self._port, test_names)
    191 
    192         tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
    193         self._printer.print_found(len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations)
    194 
    195         # Check to make sure we're not skipping every test.
    196         if not tests_to_run:
    197             _log.critical('No tests to run.')
    198             return test_run_results.RunDetails(exit_code=-1)
    199 
    200         if not self._set_up_run(tests_to_run):
    201             return test_run_results.RunDetails(exit_code=-1)
    202 
    203         # Don't retry failures if an explicit list of tests was passed in.
    204         if self._options.retry_failures is None:
    205             should_retry_failures = len(paths) < len(test_names)
    206         else:
    207             should_retry_failures = self._options.retry_failures
    208 
    209         enabled_pixel_tests_in_retry = False
    210         try:
    211             self._start_servers(tests_to_run)
    212 
    213             initial_results = self._run_tests(tests_to_run, tests_to_skip, self._options.repeat_each, self._options.iterations,
    214                 int(self._options.child_processes), retrying=False)
    215 
    216             tests_to_retry = self._tests_to_retry(initial_results)
    217             if should_retry_failures and tests_to_retry and not initial_results.interrupted:
    218                 enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed()
    219 
    220                 _log.info('')
    221                 _log.info("Retrying %d unexpected failure(s) ..." % len(tests_to_retry))
    222                 _log.info('')
    223                 retry_results = self._run_tests(tests_to_retry, tests_to_skip=set(), repeat_each=1, iterations=1,
    224                     num_workers=1, retrying=True)
    225 
    226                 if enabled_pixel_tests_in_retry:
    227                     self._options.pixel_tests = False
    228             else:
    229                 retry_results = None
    230         finally:
    231             self._stop_servers()
    232             self._clean_up_run()
    233 
    234         # Some crash logs can take a long time to be written out so look
    235         # for new logs after the test run finishes.
    236         _log.debug("looking for new crash logs")
    237         self._look_for_new_crash_logs(initial_results, start_time)
    238         if retry_results:
    239             self._look_for_new_crash_logs(retry_results, start_time)
    240 
    241         _log.debug("summarizing results")
    242         summarized_full_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry)
    243         summarized_failing_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry, only_include_failing=True)
    244 
    245         exit_code = summarized_failing_results['num_regressions']
    246         if not self._options.dry_run:
    247             self._write_json_files(summarized_full_results, summarized_failing_results, initial_results)
    248             self._upload_json_files()
    249 
    250             results_path = self._filesystem.join(self._results_directory, "results.html")
    251             self._copy_results_html_file(results_path)
    252             if self._options.show_results and (exit_code or (self._options.full_results_html and initial_results.total_failures)):
    253                 self._port.show_results_html_file(results_path)
    254 
    255         self._printer.print_results(time.time() - start_time, initial_results, summarized_failing_results)
    256         return test_run_results.RunDetails(exit_code, summarized_full_results, summarized_failing_results, initial_results, retry_results, enabled_pixel_tests_in_retry)
    257 
    258     def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations, num_workers, retrying):
    259 
    260         test_inputs = []
    261         for _ in xrange(iterations):
    262             for test in tests_to_run:
    263                 for _ in xrange(repeat_each):
    264                     test_inputs.append(self._test_input_for_file(test))
    265         return self._runner.run_tests(self._expectations, test_inputs, tests_to_skip, num_workers, retrying)
    266 
    267     def _start_servers(self, tests_to_run):
    268         if self._port.requires_http_server() or any(self._is_http_test(test) for test in tests_to_run):
    269             self._printer.write_update('Starting HTTP server ...')
    270             self._port.start_http_server(number_of_servers=(2 * self._options.max_locked_shards))
    271             self._http_server_started = True
    272 
    273         if any(self._is_websocket_test(test) for test in tests_to_run):
    274             self._printer.write_update('Starting WebSocket server ...')
    275             self._port.start_websocket_server()
    276             self._websockets_server_started = True
    277 
    278     def _stop_servers(self):
    279         if self._http_server_started:
    280             self._printer.write_update('Stopping HTTP server ...')
    281             self._http_server_started = False
    282             self._port.stop_http_server()
    283         if self._websockets_server_started:
    284             self._printer.write_update('Stopping WebSocket server ...')
    285             self._websockets_server_started = False
    286             self._port.stop_websocket_server()
    287 
    288     def _clean_up_run(self):
    289         _log.debug("Flushing stdout")
    290         sys.stdout.flush()
    291         _log.debug("Flushing stderr")
    292         sys.stderr.flush()
    293         _log.debug("Stopping helper")
    294         self._port.stop_helper()
    295         _log.debug("Cleaning up port")
    296         self._port.clean_up_test_run()
    297 
    298     def _force_pixel_tests_if_needed(self):
    299         if self._options.pixel_tests:
    300             return False
    301 
    302         _log.debug("Restarting helper")
    303         self._port.stop_helper()
    304         self._options.pixel_tests = True
    305         self._port.start_helper()
    306 
    307         return True
    308 
    309     def _look_for_new_crash_logs(self, run_results, start_time):
    310         """Since crash logs can take a long time to be written out if the system is
    311            under stress do a second pass at the end of the test run.
    312 
    313            run_results: the results of the test run
    314            start_time: time the tests started at.  We're looking for crash
    315                logs after that time.
    316         """
    317         crashed_processes = []
    318         for test, result in run_results.unexpected_results_by_name.iteritems():
    319             if (result.type != test_expectations.CRASH):
    320                 continue
    321             for failure in result.failures:
    322                 if not isinstance(failure, test_failures.FailureCrash):
    323                     continue
    324                 crashed_processes.append([test, failure.process_name, failure.pid])
    325 
    326         sample_files = self._port.look_for_new_samples(crashed_processes, start_time)
    327         if sample_files:
    328             for test, sample_file in sample_files.iteritems():
    329                 writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
    330                 writer.copy_sample_file(sample_file)
    331 
    332         crash_logs = self._port.look_for_new_crash_logs(crashed_processes, start_time)
    333         if crash_logs:
    334             for test, crash_log in crash_logs.iteritems():
    335                 writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
    336                 writer.write_crash_log(crash_log)
    337 
    338     def _clobber_old_results(self):
    339         # Just clobber the actual test results directories since the other
    340         # files in the results directory are explicitly used for cross-run
    341         # tracking.
    342         self._printer.write_update("Clobbering old results in %s" %
    343                                    self._results_directory)
    344         layout_tests_dir = self._port.layout_tests_dir()
    345         possible_dirs = self._port.test_dirs()
    346         for dirname in possible_dirs:
    347             if self._filesystem.isdir(self._filesystem.join(layout_tests_dir, dirname)):
    348                 self._filesystem.rmtree(self._filesystem.join(self._results_directory, dirname))
    349 
    350     def _tests_to_retry(self, run_results):
    351         return [result.test_name for result in run_results.unexpected_results_by_name.values() if result.type != test_expectations.PASS]
    352 
    353     def _write_json_files(self, summarized_full_results, summarized_failing_results, initial_results):
    354         _log.debug("Writing JSON files in %s." % self._results_directory)
    355 
    356         # FIXME: Upload stats.json to the server and delete times_ms.
    357         times_trie = json_results_generator.test_timings_trie(self._port, initial_results.results_by_name.values())
    358         times_json_path = self._filesystem.join(self._results_directory, "times_ms.json")
    359         json_results_generator.write_json(self._filesystem, times_trie, times_json_path)
    360 
    361         stats_trie = self._stats_trie(initial_results)
    362         stats_path = self._filesystem.join(self._results_directory, "stats.json")
    363         self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))
    364 
    365         full_results_path = self._filesystem.join(self._results_directory, "full_results.json")
    366         json_results_generator.write_json(self._filesystem, summarized_full_results, full_results_path)
    367 
    368         full_results_path = self._filesystem.join(self._results_directory, "failing_results.json")
    369         # We write failing_results.json out as jsonp because we need to load it from a file url for results.html and Chromium doesn't allow that.
    370         json_results_generator.write_json(self._filesystem, summarized_failing_results, full_results_path, callback="ADD_RESULTS")
    371 
    372         _log.debug("Finished writing JSON files.")
    373 
    374     def _upload_json_files(self):
    375         if not self._options.test_results_server:
    376             return
    377 
    378         if not self._options.master_name:
    379             _log.error("--test-results-server was set, but --master-name was not.  Not uploading JSON files.")
    380             return
    381 
    382         _log.debug("Uploading JSON files for builder: %s", self._options.builder_name)
    383         attrs = [("builder", self._options.builder_name),
    384                  ("testtype", "layout-tests"),
    385                  ("master", self._options.master_name)]
    386 
    387         files = [(file, self._filesystem.join(self._results_directory, file)) for file in ["failing_results.json", "full_results.json", "times_ms.json"]]
    388 
    389         url = "http://%s/testfile/upload" % self._options.test_results_server
    390         # Set uploading timeout in case appengine server is having problems.
    391         # 120 seconds are more than enough to upload test results.
    392         uploader = FileUploader(url, 120)
    393         try:
    394             response = uploader.upload_as_multipart_form_data(self._filesystem, files, attrs)
    395             if response:
    396                 if response.code == 200:
    397                     _log.debug("JSON uploaded.")
    398                 else:
    399                     _log.debug("JSON upload failed, %d: '%s'" % (response.code, response.read()))
    400             else:
    401                 _log.error("JSON upload failed; no response returned")
    402         except Exception, err:
    403             _log.error("Upload failed: %s" % err)
    404 
    405     def _copy_results_html_file(self, destination_path):
    406         base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast', 'harness')
    407         results_file = self._filesystem.join(base_dir, 'results.html')
    408         # Note that the results.html template file won't exist when we're using a MockFileSystem during unit tests,
    409         # so make sure it exists before we try to copy it.
    410         if self._filesystem.exists(results_file):
    411             self._filesystem.copyfile(results_file, destination_path)
    412 
    413     def _stats_trie(self, initial_results):
    414         def _worker_number(worker_name):
    415             return int(worker_name.split('/')[1]) if worker_name else -1
    416 
    417         stats = {}
    418         for result in initial_results.results_by_name.values():
    419             if result.type != test_expectations.SKIP:
    420                 stats[result.test_name] = {'results': (_worker_number(result.worker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int(result.total_run_time * 1000))}
    421         stats_trie = {}
    422         for name, value in stats.iteritems():
    423             json_results_generator.add_path_to_trie(name, value, stats_trie)
    424         return stats_trie
    425