Home | History | Annotate | Download | only in layout_tests
      1 # Copyright (C) 2010 Google Inc. All rights reserved.
      2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor (at] inf.u-szeged.hu), University of Szeged
      3 # Copyright (C) 2011 Apple Inc. All rights reserved.
      4 #
      5 # Redistribution and use in source and binary forms, with or without
      6 # modification, are permitted provided that the following conditions are
      7 # met:
      8 #
      9 #     * Redistributions of source code must retain the above copyright
     10 # notice, this list of conditions and the following disclaimer.
     11 #     * Redistributions in binary form must reproduce the above
     12 # copyright notice, this list of conditions and the following disclaimer
     13 # in the documentation and/or other materials provided with the
     14 # distribution.
     15 #     * Neither the name of Google Inc. nor the names of its
     16 # contributors may be used to endorse or promote products derived from
     17 # this software without specific prior written permission.
     18 #
     19 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     20 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     21 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     22 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     23 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     24 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     25 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     26 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     27 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     28 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     30 
     31 import Queue
     32 import StringIO
     33 import codecs
     34 import json
     35 import logging
     36 import os
     37 import platform
     38 import re
     39 import sys
     40 import thread
     41 import time
     42 import threading
     43 import unittest
     44 
     45 from webkitpy.common.system import outputcapture, path
     46 from webkitpy.common.system.crashlogs_unittest import make_mock_crash_report_darwin
     47 from webkitpy.common.system.systemhost import SystemHost
     48 from webkitpy.common.host import Host
     49 from webkitpy.common.host_mock import MockHost
     50 
     51 from webkitpy.layout_tests import port
     52 from webkitpy.layout_tests import run_webkit_tests
     53 from webkitpy.layout_tests.models import test_run_results
     54 from webkitpy.layout_tests.port import Port
     55 from webkitpy.layout_tests.port import test
     56 from webkitpy.test.skip import skip_if
     57 from webkitpy.tool import grammar
     58 from webkitpy.tool.mocktool import MockOptions
     59 
     60 
     61 def parse_args(extra_args=None, tests_included=False, new_results=False, print_nothing=True):
     62     extra_args = extra_args or []
     63     args = []
     64     if not '--platform' in extra_args:
     65         args.extend(['--platform', 'test'])
     66     if not new_results:
     67         args.append('--no-new-test-results')
     68 
     69     if not '--child-processes' in extra_args:
     70         args.extend(['--child-processes', 1])
     71     args.extend(extra_args)
     72     if not tests_included:
     73         # We use the glob to test that globbing works.
     74         args.extend(['passes',
     75                      'http/tests',
     76                      'websocket/tests',
     77                      'failures/expected/*'])
     78     return run_webkit_tests.parse_args(args)
     79 
     80 
     81 def passing_run(extra_args=None, port_obj=None, tests_included=False, host=None, shared_port=True):
     82     options, parsed_args = parse_args(extra_args, tests_included)
     83     if not port_obj:
     84         host = host or MockHost()
     85         port_obj = host.port_factory.get(port_name=options.platform, options=options)
     86 
     87     if shared_port:
     88         port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
     89 
     90     logging_stream = StringIO.StringIO()
     91     run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
     92     return run_details.exit_code == 0
     93 
     94 
     95 def logging_run(extra_args=None, port_obj=None, tests_included=False, host=None, new_results=False, shared_port=True):
     96     options, parsed_args = parse_args(extra_args=extra_args,
     97                                       tests_included=tests_included,
     98                                       print_nothing=False, new_results=new_results)
     99     host = host or MockHost()
    100     if not port_obj:
    101         port_obj = host.port_factory.get(port_name=options.platform, options=options)
    102 
    103     run_details, output = run_and_capture(port_obj, options, parsed_args, shared_port)
    104     return (run_details, output, host.user)
    105 
    106 
    107 def run_and_capture(port_obj, options, parsed_args, shared_port=True):
    108     if shared_port:
    109         port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
    110     oc = outputcapture.OutputCapture()
    111     try:
    112         oc.capture_output()
    113         logging_stream = StringIO.StringIO()
    114         run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
    115     finally:
    116         oc.restore_output()
    117     return (run_details, logging_stream)
    118 
    119 
    120 def get_tests_run(args, host=None, port_obj=None):
    121     results = get_test_results(args, host=host, port_obj=port_obj)
    122     return [result.test_name for result in results]
    123 
    124 
    125 def get_test_batches(args, host=None):
    126     results = get_test_results(args, host)
    127     batches = []
    128     batch = []
    129     current_pid = None
    130     for result in results:
    131         if batch and result.pid != current_pid:
    132             batches.append(batch)
    133             batch = []
    134         batch.append(result.test_name)
    135     if batch:
    136         batches.append(batch)
    137     return batches
    138 
    139 
    140 def get_test_results(args, host=None, port_obj=None):
    141     options, parsed_args = parse_args(args, tests_included=True)
    142 
    143     host = host or MockHost()
    144     port_obj = port_obj or host.port_factory.get(port_name=options.platform, options=options)
    145 
    146     oc = outputcapture.OutputCapture()
    147     oc.capture_output()
    148     logging_stream = StringIO.StringIO()
    149     try:
    150         run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
    151     finally:
    152         oc.restore_output()
    153 
    154     all_results = []
    155     if run_details.initial_results:
    156         all_results.extend(run_details.initial_results.all_results)
    157 
    158     if run_details.retry_results:
    159         all_results.extend(run_details.retry_results.all_results)
    160     return all_results
    161 
    162 
    163 def parse_full_results(full_results_text):
    164     json_to_eval = full_results_text.replace("ADD_RESULTS(", "").replace(");", "")
    165     compressed_results = json.loads(json_to_eval)
    166     return compressed_results
    167 
    168 
    169 class StreamTestingMixin(object):
    170     def assertContains(self, stream, string):
    171         self.assertTrue(string in stream.getvalue())
    172 
    173     def assertEmpty(self, stream):
    174         self.assertFalse(stream.getvalue())
    175 
    176     def assertNotEmpty(self, stream):
    177         self.assertTrue(stream.getvalue())
    178 
    179 
    180 class RunTest(unittest.TestCase, StreamTestingMixin):
    181     def setUp(self):
    182         # A real PlatformInfo object is used here instead of a
    183         # MockPlatformInfo because we need to actually check for
    184         # Windows and Mac to skip some tests.
    185         self._platform = SystemHost().platform
    186 
    187         # FIXME: Remove this when we fix test-webkitpy to work
    188         # properly on cygwin (bug 63846).
    189         self.should_test_processes = not self._platform.is_win()
    190 
    191     def test_basic(self):
    192         options, args = parse_args(tests_included=True)
    193         logging_stream = StringIO.StringIO()
    194         host = MockHost()
    195         port_obj = host.port_factory.get(options.platform, options)
    196         details = run_webkit_tests.run(port_obj, options, args, logging_stream)
    197 
    198         # These numbers will need to be updated whenever we add new tests.
    199         self.assertEqual(details.initial_results.total, test.TOTAL_TESTS)
    200         self.assertEqual(details.initial_results.expected_skips, test.TOTAL_SKIPS)
    201         self.assertEqual(len(details.initial_results.unexpected_results_by_name), test.UNEXPECTED_PASSES + test.UNEXPECTED_FAILURES)
    202         self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES)
    203         self.assertEqual(details.retry_results.total, test.UNEXPECTED_FAILURES)
    204 
    205         expected_tests = details.initial_results.total - details.initial_results.expected_skips - len(details.initial_results.unexpected_results_by_name)
    206         expected_summary_str = ''
    207         if details.initial_results.expected_failures > 0:
    208             expected_summary_str = " (%d passed, %d didn't)" % (expected_tests - details.initial_results.expected_failures, details.initial_results.expected_failures)
    209         one_line_summary = "%d tests ran as expected%s, %d didn't:\n" % (
    210             expected_tests,
    211             expected_summary_str,
    212             len(details.initial_results.unexpected_results_by_name))
    213         self.assertTrue(one_line_summary in logging_stream.buflist)
    214 
    215         # Ensure the results were summarized properly.
    216         self.assertEqual(details.summarized_failing_results['num_regressions'], details.exit_code)
    217 
    218         # Ensure the results were written out and displayed.
    219         failing_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/failing_results.json')
    220         json_to_eval = failing_results_text.replace("ADD_RESULTS(", "").replace(");", "")
    221         self.assertEqual(json.loads(json_to_eval), details.summarized_failing_results)
    222 
    223         full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
    224         self.assertEqual(json.loads(full_results_text), details.summarized_full_results)
    225 
    226         self.assertEqual(host.user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
    227 
    228     def test_batch_size(self):
    229         batch_tests_run = get_test_batches(['--batch-size', '2'])
    230         for batch in batch_tests_run:
    231             self.assertTrue(len(batch) <= 2, '%s had too many tests' % ', '.join(batch))
    232 
    233     def test_max_locked_shards(self):
    234         # Tests for the default of using one locked shard even in the case of more than one child process.
    235         if not self.should_test_processes:
    236             return
    237         save_env_webkit_test_max_locked_shards = None
    238         if "WEBKIT_TEST_MAX_LOCKED_SHARDS" in os.environ:
    239             save_env_webkit_test_max_locked_shards = os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"]
    240             del os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"]
    241         _, regular_output, _ = logging_run(['--debug-rwt-logging', '--child-processes', '2'], shared_port=False)
    242         try:
    243             self.assertTrue(any(['1 locked' in line for line in regular_output.buflist]))
    244         finally:
    245             if save_env_webkit_test_max_locked_shards:
    246                 os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"] = save_env_webkit_test_max_locked_shards
    247 
    248     def test_child_processes_2(self):
    249         if self.should_test_processes:
    250             _, regular_output, _ = logging_run(
    251                 ['--debug-rwt-logging', '--child-processes', '2'], shared_port=False)
    252             self.assertTrue(any(['Running 2 ' in line for line in regular_output.buflist]))
    253 
    254     def test_child_processes_min(self):
    255         if self.should_test_processes:
    256             _, regular_output, _ = logging_run(
    257                 ['--debug-rwt-logging', '--child-processes', '2', '-i', 'passes/virtual_passes', 'passes'],
    258                 tests_included=True, shared_port=False)
    259             self.assertTrue(any(['Running 1 ' in line for line in regular_output.buflist]))
    260 
    261     def test_dryrun(self):
    262         tests_run = get_tests_run(['--dry-run'])
    263         self.assertEqual(tests_run, [])
    264 
    265         tests_run = get_tests_run(['-n'])
    266         self.assertEqual(tests_run, [])
    267 
    268     def test_enable_sanitizer(self):
    269         self.assertTrue(passing_run(['--enable-sanitizer', 'failures/expected/text.html']))
    270 
    271     def test_exception_raised(self):
    272         # Exceptions raised by a worker are treated differently depending on
    273         # whether they are in-process or out. inline exceptions work as normal,
    274         # which allows us to get the full stack trace and traceback from the
    275         # worker. The downside to this is that it could be any error, but this
    276         # is actually useful in testing.
    277         #
    278         # Exceptions raised in a separate process are re-packaged into
    279         # WorkerExceptions (a subclass of BaseException), which have a string capture of the stack which can
    280         # be printed, but don't display properly in the unit test exception handlers.
    281         self.assertRaises(BaseException, logging_run,
    282             ['failures/expected/exception.html', '--child-processes', '1'], tests_included=True)
    283 
    284         if self.should_test_processes:
    285             self.assertRaises(BaseException, logging_run,
    286                 ['--child-processes', '2', '--skipped=ignore', 'failures/expected/exception.html', 'passes/text.html'], tests_included=True, shared_port=False)
    287 
    288     def test_device_failure(self):
    289         # Test that we handle a device going offline during a test properly.
    290         details, regular_output, _ = logging_run(['failures/expected/device_failure.html'], tests_included=True)
    291         self.assertEqual(details.exit_code, 0)
    292         self.assertTrue('worker/0 has failed' in regular_output.getvalue())
    293 
    294     def test_full_results_html(self):
    295         host = MockHost()
    296         details, _, _ = logging_run(['--full-results-html'], host=host)
    297         self.assertEqual(details.exit_code, 0)
    298         self.assertEqual(len(host.user.opened_urls), 1)
    299 
    300     def test_keyboard_interrupt(self):
    301         # Note that this also tests running a test marked as SKIP if
    302         # you specify it explicitly.
    303         details, _, _ = logging_run(['failures/expected/keyboard.html', '--child-processes', '1'], tests_included=True)
    304         self.assertEqual(details.exit_code, test_run_results.INTERRUPTED_EXIT_STATUS)
    305 
    306         if self.should_test_processes:
    307             _, regular_output, _ = logging_run(['failures/expected/keyboard.html', 'passes/text.html', '--child-processes', '2', '--skipped=ignore'], tests_included=True, shared_port=False)
    308             self.assertTrue(any(['Interrupted, exiting' in line for line in regular_output.buflist]))
    309 
    310     def test_no_tests_found(self):
    311         details, err, _ = logging_run(['resources'], tests_included=True)
    312         self.assertEqual(details.exit_code, test_run_results.NO_TESTS_EXIT_STATUS)
    313         self.assertContains(err, 'No tests to run.\n')
    314 
    315     def test_no_tests_found_2(self):
    316         details, err, _ = logging_run(['foo'], tests_included=True)
    317         self.assertEqual(details.exit_code, test_run_results.NO_TESTS_EXIT_STATUS)
    318         self.assertContains(err, 'No tests to run.\n')
    319 
    320     def test_no_tests_found_3(self):
    321         details, err, _ = logging_run(['--run-chunk', '5:400', 'foo/bar.html'], tests_included=True)
    322         self.assertEqual(details.exit_code, test_run_results.NO_TESTS_EXIT_STATUS)
    323         self.assertContains(err, 'No tests to run.\n')
    324 
    325     def test_natural_order(self):
    326         tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
    327         tests_run = get_tests_run(['--order=natural'] + tests_to_run)
    328         self.assertEqual(['failures/expected/missing_text.html', 'failures/expected/text.html', 'passes/args.html', 'passes/audio.html'], tests_run)
    329 
    330     def test_natural_order_test_specified_multiple_times(self):
    331         tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
    332         tests_run = get_tests_run(['--order=natural'] + tests_to_run)
    333         self.assertEqual(['passes/args.html', 'passes/args.html', 'passes/audio.html', 'passes/audio.html'], tests_run)
    334 
    335     def test_random_order(self):
    336         tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
    337         tests_run = get_tests_run(['--order=random'] + tests_to_run)
    338         self.assertEqual(sorted(tests_to_run), sorted(tests_run))
    339 
    340     def test_random_daily_seed_order(self):
    341         tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
    342         tests_run = get_tests_run(['--order=random-seeded'] + tests_to_run)
    343         self.assertEqual(sorted(tests_to_run), sorted(tests_run))
    344 
    345     def test_random_order_test_specified_multiple_times(self):
    346         tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
    347         tests_run = get_tests_run(['--order=random'] + tests_to_run)
    348         self.assertEqual(tests_run.count('passes/audio.html'), 2)
    349         self.assertEqual(tests_run.count('passes/args.html'), 2)
    350 
    351     def test_no_order(self):
    352         tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
    353         tests_run = get_tests_run(['--order=none'] + tests_to_run)
    354         self.assertEqual(tests_to_run, tests_run)
    355 
    356     def test_no_order_test_specified_multiple_times(self):
    357         tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
    358         tests_run = get_tests_run(['--order=none'] + tests_to_run)
    359         self.assertEqual(tests_to_run, tests_run)
    360 
    361     def test_no_order_with_directory_entries_in_natural_order(self):
    362         tests_to_run = ['http/tests/ssl', 'perf/foo', 'http/tests/passes']
    363         tests_run = get_tests_run(['--order=none'] + tests_to_run)
    364         self.assertEqual(tests_run, ['http/tests/ssl/text.html', 'perf/foo/test.html', 'http/tests/passes/image.html', 'http/tests/passes/text.html'])
    365 
    366     def test_repeat_each(self):
    367         tests_to_run = ['passes/image.html', 'passes/text.html']
    368         tests_run = get_tests_run(['--repeat-each', '2'] + tests_to_run)
    369         self.assertEqual(tests_run, ['passes/image.html', 'passes/image.html', 'passes/text.html', 'passes/text.html'])
    370 
    371     def test_ignore_flag(self):
    372         # Note that passes/image.html is expected to be run since we specified it directly.
    373         tests_run = get_tests_run(['-i', 'passes', 'passes/image.html'])
    374         self.assertFalse('passes/text.html' in tests_run)
    375         self.assertTrue('passes/image.html' in tests_run)
    376 
    377     def test_skipped_flag(self):
    378         tests_run = get_tests_run(['passes'])
    379         self.assertFalse('passes/skipped/skip.html' in tests_run)
    380         num_tests_run_by_default = len(tests_run)
    381 
    382         # Check that nothing changes when we specify skipped=default.
    383         self.assertEqual(len(get_tests_run(['--skipped=default', 'passes'])),
    384                           num_tests_run_by_default)
    385 
    386         # Now check that we run one more test (the skipped one).
    387         tests_run = get_tests_run(['--skipped=ignore', 'passes'])
    388         self.assertTrue('passes/skipped/skip.html' in tests_run)
    389         self.assertEqual(len(tests_run), num_tests_run_by_default + 1)
    390 
    391         # Now check that we only run the skipped test.
    392         self.assertEqual(get_tests_run(['--skipped=only', 'passes']), ['passes/skipped/skip.html'])
    393 
    394         # Now check that we don't run anything.
    395         self.assertEqual(get_tests_run(['--skipped=always', 'passes/skipped/skip.html']), [])
    396 
    397     def test_iterations(self):
    398         tests_to_run = ['passes/image.html', 'passes/text.html']
    399         tests_run = get_tests_run(['--iterations', '2'] + tests_to_run)
    400         self.assertEqual(tests_run, ['passes/image.html', 'passes/text.html', 'passes/image.html', 'passes/text.html'])
    401 
    402     def test_repeat_each_iterations_num_tests(self):
    403         # The total number of tests should be: number_of_tests *
    404         # repeat_each * iterations
    405         host = MockHost()
    406         _, err, _ = logging_run(
    407             ['--iterations', '2', '--repeat-each', '4', '--debug-rwt-logging', 'passes/text.html', 'failures/expected/text.html'],
    408             tests_included=True, host=host)
    409         self.assertContains(err, "All 16 tests ran as expected (8 passed, 8 didn't).\n")
    410 
    411     def test_run_chunk(self):
    412         # Test that we actually select the right chunk
    413         all_tests_run = get_tests_run(['passes', 'failures'])
    414         chunk_tests_run = get_tests_run(['--run-chunk', '1:4', 'passes', 'failures'])
    415         self.assertEqual(all_tests_run[4:8], chunk_tests_run)
    416 
    417         # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
    418         tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
    419         chunk_tests_run = get_tests_run(['--run-chunk', '1:3'] + tests_to_run)
    420         self.assertEqual(['passes/text.html', 'passes/error.html', 'passes/image.html'], chunk_tests_run)
    421 
    422     def test_run_part(self):
    423         # Test that we actually select the right part
    424         tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
    425         tests_run = get_tests_run(['--run-part', '1:2'] + tests_to_run)
    426         self.assertEqual(['passes/error.html', 'passes/image.html'], tests_run)
    427 
    428         # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
    429         # (here we end up with 3 parts, each with 2 tests, and we only have 4 tests total, so the
    430         # last part repeats the first two tests).
    431         chunk_tests_run = get_tests_run(['--run-part', '3:3'] + tests_to_run)
    432         self.assertEqual(['passes/error.html', 'passes/image.html'], chunk_tests_run)
    433 
    434     def test_run_singly(self):
    435         batch_tests_run = get_test_batches(['--run-singly'])
    436         for batch in batch_tests_run:
    437             self.assertEqual(len(batch), 1, '%s had too many tests' % ', '.join(batch))
    438 
    439     def test_skip_failing_tests(self):
    440         # This tests that we skip both known failing and known flaky tests. Because there are
    441         # no known flaky tests in the default test_expectations, we add additional expectations.
    442         host = MockHost()
    443         host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) passes/image.html [ ImageOnlyFailure Pass ]\n')
    444 
    445         batches = get_test_batches(['--skip-failing-tests', '--additional-expectations', '/tmp/overrides.txt'], host=host)
    446         has_passes_text = False
    447         for batch in batches:
    448             self.assertFalse('failures/expected/text.html' in batch)
    449             self.assertFalse('passes/image.html' in batch)
    450             has_passes_text = has_passes_text or ('passes/text.html' in batch)
    451         self.assertTrue(has_passes_text)
    452 
    453     def test_single_file(self):
    454         tests_run = get_tests_run(['passes/text.html'])
    455         self.assertEqual(tests_run, ['passes/text.html'])
    456 
    457     def test_single_file_with_prefix(self):
    458         tests_run = get_tests_run(['LayoutTests/passes/text.html'])
    459         self.assertEqual(['passes/text.html'], tests_run)
    460 
    461     def test_single_skipped_file(self):
    462         tests_run = get_tests_run(['failures/expected/keybaord.html'])
    463         self.assertEqual([], tests_run)
    464 
    465     def test_stderr_is_saved(self):
    466         host = MockHost()
    467         self.assertTrue(passing_run(host=host))
    468         self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/passes/error-stderr.txt'),
    469                           'stuff going to stderr')
    470 
    471     def test_test_list(self):
    472         host = MockHost()
    473         filename = '/tmp/foo.txt'
    474         host.filesystem.write_text_file(filename, 'passes/text.html')
    475         tests_run = get_tests_run(['--test-list=%s' % filename], host=host)
    476         self.assertEqual(['passes/text.html'], tests_run)
    477         host.filesystem.remove(filename)
    478         details, err, user = logging_run(['--test-list=%s' % filename], tests_included=True, host=host)
    479         self.assertEqual(details.exit_code, test_run_results.NO_TESTS_EXIT_STATUS)
    480         self.assertNotEmpty(err)
    481 
    482     def test_test_list_with_prefix(self):
    483         host = MockHost()
    484         filename = '/tmp/foo.txt'
    485         host.filesystem.write_text_file(filename, 'LayoutTests/passes/text.html')
    486         tests_run = get_tests_run(['--test-list=%s' % filename], host=host)
    487         self.assertEqual(['passes/text.html'], tests_run)
    488 
    489     def test_smoke_test(self):
    490         host = MockHost()
    491         smoke_test_filename = test.LAYOUT_TEST_DIR + '/SmokeTests'
    492         host.filesystem.write_text_file(smoke_test_filename, 'passes/text.html\n')
    493 
    494         # Test the default smoke testing.
    495         tests_run = get_tests_run(['--smoke'], host=host)
    496         self.assertEqual(['passes/text.html'], tests_run)
    497 
    498         # Test running the smoke tests plus some manually-specified tests.
    499         tests_run = get_tests_run(['--smoke', 'passes/image.html'], host=host)
    500         self.assertEqual(['passes/image.html', 'passes/text.html'], tests_run)
    501 
    502         # Test running the smoke tests plus some manually-specified tests.
    503         tests_run = get_tests_run(['--no-smoke', 'passes/image.html'], host=host)
    504         self.assertEqual(['passes/image.html'], tests_run)
    505 
    506         # Test that we don't run just the smoke tests by default on a normal test port.
    507         tests_run = get_tests_run([], host=host)
    508         self.assertNotEqual(['passes/text.html'], tests_run)
    509 
    510         # Create a port that does run only the smoke tests by default, and verify that works as expected.
    511         port_obj = host.port_factory.get('test')
    512         port_obj.default_smoke_test_only = lambda: True
    513         tests_run = get_tests_run([], host=host, port_obj=port_obj)
    514         self.assertEqual(['passes/text.html'], tests_run)
    515 
    516         # Verify that --no-smoke continues to work on a smoke-by-default port.
    517         tests_run = get_tests_run(['--no-smoke'], host=host, port_obj=port_obj)
    518         self.assertNotEqual(['passes/text.html'], tests_run)
    519 
    520     def test_missing_and_unexpected_results(self):
    521         # Test that we update expectations in place. If the expectation
    522         # is missing, update the expected generic location.
    523         host = MockHost()
    524         details, err, _ = logging_run(['--no-show-results', '--retry-failures',
    525             'failures/expected/missing_image.html',
    526             'failures/unexpected/missing_text.html',
    527             'failures/unexpected/text-image-checksum.html'],
    528             tests_included=True, host=host)
    529         file_list = host.filesystem.written_files.keys()
    530         self.assertEqual(details.exit_code, 2)
    531         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
    532         self.assertTrue(json_string.find('"text-image-checksum.html":{"expected":"PASS","actual":"IMAGE+TEXT","is_unexpected":true') != -1)
    533         self.assertTrue(json_string.find('"missing_text.html":{"expected":"PASS","is_missing_text":true,"actual":"MISSING","is_unexpected":true') != -1)
    534         self.assertTrue(json_string.find('"num_regressions":2') != -1)
    535         self.assertTrue(json_string.find('"num_flaky":0') != -1)
    536 
    537     def test_different_failure_on_retry(self):
    538         # This tests that if a test fails two different ways -- both unexpected
    539         # -- we treat it as a failure rather than a flaky result.  We use the
    540         # initial failure for simplicity and consistency w/ the flakiness
    541         # dashboard, even if the second failure is worse.
    542 
    543         details, err, _ = logging_run(['--retry-failures', 'failures/unexpected/text_then_crash.html'], tests_included=True)
    544         self.assertEqual(details.exit_code, 1)
    545         self.assertEqual(details.summarized_failing_results['tests']['failures']['unexpected']['text_then_crash.html']['actual'],
    546                          'TEXT CRASH')
    547 
    548         # If we get a test that fails two different ways -- but the second one is expected --
    549         # we should treat it as a flaky result and report the initial unexpected failure type
    550         # to the dashboard. However, the test should be considered passing.
    551         details, err, _ = logging_run(['--retry-failures', 'failures/expected/crash_then_text.html'], tests_included=True)
    552         self.assertEqual(details.exit_code, 0)
    553         self.assertEqual(details.summarized_failing_results['tests']['failures']['expected']['crash_then_text.html']['actual'],
    554                          'CRASH FAIL')
    555 
    556     def test_pixel_test_directories(self):
    557         host = MockHost()
    558 
    559         """Both tests have failing checksum. We include only the first in pixel tests so only that should fail."""
    560         args = ['--pixel-tests', '--retry-failures', '--pixel-test-directory', 'failures/unexpected/pixeldir',
    561                 'failures/unexpected/pixeldir/image_in_pixeldir.html',
    562                 'failures/unexpected/image_not_in_pixeldir.html']
    563         details, err, _ = logging_run(extra_args=args, host=host, tests_included=True)
    564 
    565         self.assertEqual(details.exit_code, 1)
    566         expected_token = '"pixeldir":{"image_in_pixeldir.html":{"expected":"PASS","actual":"IMAGE","is_unexpected":true'
    567         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
    568         self.assertTrue(json_string.find(expected_token) != -1)
    569 
    570     def test_crash_with_stderr(self):
    571         host = MockHost()
    572         _, regular_output, _ = logging_run(['failures/unexpected/crash-with-stderr.html'], tests_included=True, host=host)
    573         self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('{"crash-with-stderr.html":{"expected":"PASS","actual":"CRASH","has_stderr":true,"is_unexpected":true') != -1)
    574 
    575     def test_no_image_failure_with_image_diff(self):
    576         host = MockHost()
    577         _, regular_output, _ = logging_run(['failures/unexpected/checksum-with-matching-image.html'], tests_included=True, host=host)
    578         self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('"num_regressions":0') != -1)
    579 
    580     def test_exit_after_n_failures_upload(self):
    581         host = MockHost()
    582         details, regular_output, user = logging_run(
    583            ['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'],
    584            tests_included=True, host=host)
    585 
    586         # By returning False, we know that the incremental results were generated and then deleted.
    587         self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/incremental_results.json'))
    588 
    589         self.assertEqual(details.exit_code, test_run_results.EARLY_EXIT_STATUS)
    590 
    591         # This checks that passes/text.html is considered SKIPped.
    592         self.assertTrue('"skipped":1' in host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))
    593 
    594         # This checks that we told the user we bailed out.
    595         self.assertTrue('Exiting early after 1 failures. 1 tests run.\n' in regular_output.getvalue())
    596 
    597         # This checks that neither test ran as expected.
    598         # FIXME: This log message is confusing; tests that were skipped should be called out separately.
    599         self.assertTrue('0 tests ran as expected, 2 didn\'t:\n' in regular_output.getvalue())
    600 
    601     def test_exit_after_n_failures(self):
    602         # Unexpected failures should result in tests stopping.
    603         tests_run = get_tests_run(['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'])
    604         self.assertEqual(['failures/unexpected/text-image-checksum.html'], tests_run)
    605 
    606         # But we'll keep going for expected ones.
    607         tests_run = get_tests_run(['failures/expected/text.html', 'passes/text.html', '--exit-after-n-failures', '1'])
    608         self.assertEqual(['failures/expected/text.html', 'passes/text.html'], tests_run)
    609 
    610     def test_exit_after_n_crashes(self):
    611         # Unexpected crashes should result in tests stopping.
    612         tests_run = get_tests_run(['failures/unexpected/crash.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
    613         self.assertEqual(['failures/unexpected/crash.html'], tests_run)
    614 
    615         # Same with timeouts.
    616         tests_run = get_tests_run(['failures/unexpected/timeout.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
    617         self.assertEqual(['failures/unexpected/timeout.html'], tests_run)
    618 
    619         # But we'll keep going for expected ones.
    620         tests_run = get_tests_run(['failures/expected/crash.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
    621         self.assertEqual(['failures/expected/crash.html', 'passes/text.html'], tests_run)
    622 
    623     def test_results_directory_absolute(self):
    624         # We run a configuration that should fail, to generate output, then
    625         # look for what the output results url was.
    626 
    627         host = MockHost()
    628         with host.filesystem.mkdtemp() as tmpdir:
    629             _, _, user = logging_run(['--results-directory=' + str(tmpdir)], tests_included=True, host=host)
    630             self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, host.filesystem.join(tmpdir, 'results.html'))])
    631 
    632     def test_results_directory_default(self):
    633         # We run a configuration that should fail, to generate output, then
    634         # look for what the output results url was.
    635 
    636         # This is the default location.
    637         _, _, user = logging_run(tests_included=True)
    638         self.assertEqual(user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
    639 
    640     def test_results_directory_relative(self):
    641         # We run a configuration that should fail, to generate output, then
    642         # look for what the output results url was.
    643         host = MockHost()
    644         host.filesystem.maybe_make_directory('/tmp/cwd')
    645         host.filesystem.chdir('/tmp/cwd')
    646         _, _, user = logging_run(['--results-directory=foo'], tests_included=True, host=host)
    647         self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, '/tmp/cwd/foo/results.html')])
    648 
    649     def test_retrying_default_value(self):
    650         host = MockHost()
    651         details, err, _ = logging_run(['--debug-rwt-logging', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
    652         self.assertEqual(details.exit_code, 1)
    653         self.assertFalse('Retrying' in err.getvalue())
    654 
    655         host = MockHost()
    656         details, err, _ = logging_run(['--debug-rwt-logging', 'failures/unexpected'], tests_included=True, host=host)
    657         self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES - 7)  # FIXME: This should be a constant in test.py .
    658         self.assertTrue('Retrying' in err.getvalue())
    659 
    660     def test_retrying_default_value_test_list(self):
    661         host = MockHost()
    662         filename = '/tmp/foo.txt'
    663         host.filesystem.write_text_file(filename, 'failures/unexpected/text-image-checksum.html\nfailures/unexpected/crash.html')
    664         details, err, _ = logging_run(['--debug-rwt-logging', '--test-list=%s' % filename], tests_included=True, host=host)
    665         self.assertEqual(details.exit_code, 2)
    666         self.assertFalse('Retrying' in err.getvalue())
    667 
    668         host = MockHost()
    669         filename = '/tmp/foo.txt'
    670         host.filesystem.write_text_file(filename, 'failures')
    671         details, err, _ = logging_run(['--debug-rwt-logging', '--test-list=%s' % filename], tests_included=True, host=host)
    672         self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES - 7)
    673         self.assertTrue('Retrying' in err.getvalue())
    674 
    675     def test_retrying_and_flaky_tests(self):
    676         host = MockHost()
    677         details, err, _ = logging_run(['--debug-rwt-logging', '--retry-failures', 'failures/flaky'], tests_included=True, host=host)
    678         self.assertEqual(details.exit_code, 0)
    679         self.assertTrue('Retrying' in err.getvalue())
    680         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
    681         self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/retries/failures/flaky/text-actual.txt'))
    682         self.assertEqual(len(host.user.opened_urls), 0)
    683 
    684         # Now we test that --clobber-old-results does remove the old entries and the old retries,
    685         # and that we don't retry again.
    686         host = MockHost()
    687         details, err, _ = logging_run(['--no-retry-failures', '--clobber-old-results', 'failures/flaky'], tests_included=True, host=host)
    688         self.assertEqual(details.exit_code, 1)
    689         self.assertTrue('Clobbering old results' in err.getvalue())
    690         self.assertTrue('flaky/text.html' in err.getvalue())
    691         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
    692         self.assertFalse(host.filesystem.exists('retries'))
    693         self.assertEqual(len(host.user.opened_urls), 1)
    694 
    695     def test_retrying_crashed_tests(self):
    696         host = MockHost()
    697         details, err, _ = logging_run(['--retry-failures', 'failures/unexpected/crash.html'], tests_included=True, host=host)
    698         self.assertEqual(details.exit_code, 1)
    699         self.assertTrue('Retrying' in err.getvalue())
    700 
    701     def test_retrying_leak_tests(self):
    702         host = MockHost()
    703         details, err, _ = logging_run(['--retry-failures', 'failures/unexpected/leak.html'], tests_included=True, host=host)
    704         self.assertEqual(details.exit_code, 1)
    705         self.assertTrue('Retrying' in err.getvalue())
    706 
    707     def test_retrying_force_pixel_tests(self):
    708         host = MockHost()
    709         details, err, _ = logging_run(['--no-pixel-tests', '--retry-failures', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
    710         self.assertEqual(details.exit_code, 1)
    711         self.assertTrue('Retrying' in err.getvalue())
    712         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt'))
    713         self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.png'))
    714         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.txt'))
    715         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.png'))
    716         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
    717         json = parse_full_results(json_string)
    718         self.assertEqual(json["tests"]["failures"]["unexpected"]["text-image-checksum.html"],
    719             {"expected": "PASS", "actual": "TEXT IMAGE+TEXT", "is_unexpected": True})
    720         self.assertFalse(json["pixel_tests_enabled"])
    721         self.assertEqual(details.enabled_pixel_tests_in_retry, True)
    722 
    723     def test_retrying_uses_retries_directory(self):
    724         host = MockHost()
    725         details, err, _ = logging_run(['--debug-rwt-logging', '--retry-failures', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
    726         self.assertEqual(details.exit_code, 1)
    727         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt'))
    728         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.txt'))
    729 
    730     def test_run_order__inline(self):
    731         # These next tests test that we run the tests in ascending alphabetical
    732         # order per directory. HTTP tests are sharded separately from other tests,
    733         # so we have to test both.
    734         tests_run = get_tests_run(['-i', 'passes/virtual_passes', 'passes'])
    735         self.assertEqual(tests_run, sorted(tests_run))
    736 
    737         tests_run = get_tests_run(['http/tests/passes'])
    738         self.assertEqual(tests_run, sorted(tests_run))
    739 
    740     def test_virtual(self):
    741         self.assertTrue(passing_run(['passes/text.html', 'passes/args.html',
    742                                      'virtual/passes/text.html', 'virtual/passes/args.html']))
    743 
    744     def test_reftest_run(self):
    745         tests_run = get_tests_run(['passes/reftest.html'])
    746         self.assertEqual(['passes/reftest.html'], tests_run)
    747 
    748     def test_reftest_run_reftests_if_pixel_tests_are_disabled(self):
    749         tests_run = get_tests_run(['--no-pixel-tests', 'passes/reftest.html'])
    750         self.assertEqual(['passes/reftest.html'], tests_run)
    751 
    752     def test_reftest_expected_html_should_be_ignored(self):
    753         tests_run = get_tests_run(['passes/reftest-expected.html'])
    754         self.assertEqual([], tests_run)
    755 
    756     def test_reftest_driver_should_run_expected_html(self):
    757         tests_run = get_test_results(['passes/reftest.html'])
    758         self.assertEqual(tests_run[0].references, ['passes/reftest-expected.html'])
    759 
    760     def test_reftest_driver_should_run_expected_mismatch_html(self):
    761         tests_run = get_test_results(['passes/mismatch.html'])
    762         self.assertEqual(tests_run[0].references, ['passes/mismatch-expected-mismatch.html'])
    763 
    764     def test_reftest_should_not_use_naming_convention_if_not_listed_in_reftestlist(self):
    765         host = MockHost()
    766         _, err, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host)
    767         results = parse_full_results(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))
    768 
    769         self.assertEqual(results["tests"]["reftests"]["foo"]["unlistedtest.html"]["actual"], "MISSING"),
    770         self.assertEqual(results["num_regressions"], 5)
    771         self.assertEqual(results["num_flaky"], 0)
    772 
    773     def test_reftest_crash(self):
    774         test_results = get_test_results(['failures/unexpected/crash-reftest.html'])
    775         # The list of references should be empty since the test crashed and we didn't run any references.
    776         self.assertEqual(test_results[0].references, [])
    777 
    778     def test_reftest_with_virtual_reference(self):
    779         _, err, _ = logging_run(['--details', 'virtual/virtual_passes/passes/reftest.html'], tests_included=True)
    780         self.assertTrue('ref: virtual/virtual_passes/passes/reftest-expected.html' in err.getvalue())
    781 
    782     def test_additional_platform_directory(self):
    783         self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo']))
    784         self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/../foo']))
    785         self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo', '--additional-platform-directory', '/tmp/bar']))
    786         self.assertTrue(passing_run(['--additional-platform-directory', 'foo']))
    787 
    788     def test_additional_expectations(self):
    789         host = MockHost()
    790         host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) failures/unexpected/mismatch.html [ ImageOnlyFailure ]\n')
    791         self.assertTrue(passing_run(['--additional-expectations', '/tmp/overrides.txt', 'failures/unexpected/mismatch.html'],
    792                                     tests_included=True, host=host))
    793 
    794     @staticmethod
    795     def has_test_of_type(tests, type):
    796         return [test for test in tests if type in test]
    797 
    798     def test_platform_directories_ignored_when_searching_for_tests(self):
    799         tests_run = get_tests_run(['--platform', 'test-mac-leopard'])
    800         self.assertFalse('platform/test-mac-leopard/http/test.html' in tests_run)
    801         self.assertFalse('platform/test-win-win7/http/test.html' in tests_run)
    802 
    803     def test_platform_directories_not_searched_for_additional_tests(self):
    804         tests_run = get_tests_run(['--platform', 'test-mac-leopard', 'http'])
    805         self.assertFalse('platform/test-mac-leopard/http/test.html' in tests_run)
    806         self.assertFalse('platform/test-win-win7/http/test.html' in tests_run)
    807 
    808     def test_output_diffs(self):
    809         # Test to ensure that we don't generate -wdiff.html or -pretty.html if wdiff and PrettyPatch
    810         # aren't available.
    811         host = MockHost()
    812         _, err, _ = logging_run(['--pixel-tests', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
    813         written_files = host.filesystem.written_files
    814         self.assertTrue(any(path.endswith('-diff.txt') for path in written_files.keys()))
    815         self.assertFalse(any(path.endswith('-wdiff.html') for path in written_files.keys()))
    816         self.assertFalse(any(path.endswith('-pretty-diff.html') for path in written_files.keys()))
    817 
    818         full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
    819         full_results = json.loads(full_results_text.replace("ADD_RESULTS(", "").replace(");", ""))
    820         self.assertEqual(full_results['has_wdiff'], False)
    821         self.assertEqual(full_results['has_pretty_patch'], False)
    822 
    823     def test_unsupported_platform(self):
    824         stdout = StringIO.StringIO()
    825         stderr = StringIO.StringIO()
    826         res = run_webkit_tests.main(['--platform', 'foo'], stdout, stderr)
    827 
    828         self.assertEqual(res, test_run_results.UNEXPECTED_ERROR_EXIT_STATUS)
    829         self.assertEqual(stdout.getvalue(), '')
    830         self.assertTrue('unsupported platform' in stderr.getvalue())
    831 
    832     def test_build_check(self):
    833         # By using a port_name for a different platform than the one we're running on, the build check should always fail.
    834         if sys.platform == 'darwin':
    835             port_name = 'linux-x86'
    836         else:
    837             port_name = 'mac-lion'
    838         out = StringIO.StringIO()
    839         err = StringIO.StringIO()
    840         self.assertEqual(run_webkit_tests.main(['--platform', port_name, 'fast/harness/results.html'], out, err), test_run_results.UNEXPECTED_ERROR_EXIT_STATUS)
    841 
    842     def test_verbose_in_child_processes(self):
    843         # When we actually run multiple processes, we may have to reconfigure logging in the
    844         # child process (e.g., on win32) and we need to make sure that works and we still
    845         # see the verbose log output. However, we can't use logging_run() because using
    846         # outputcapture to capture stdout and stderr latter results in a nonpicklable host.
    847 
    848         # Test is flaky on Windows: https://bugs.webkit.org/show_bug.cgi?id=98559
    849         if not self.should_test_processes:
    850             return
    851 
    852         options, parsed_args = parse_args(['--verbose', '--fully-parallel', '--child-processes', '2', 'passes/text.html', 'passes/image.html'], tests_included=True, print_nothing=False)
    853         host = MockHost()
    854         port_obj = host.port_factory.get(port_name=options.platform, options=options)
    855         logging_stream = StringIO.StringIO()
    856         run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
    857         self.assertTrue('text.html passed' in logging_stream.getvalue())
    858         self.assertTrue('image.html passed' in logging_stream.getvalue())
    859 
    860     def disabled_test_driver_logging(self):
    861         # FIXME: Figure out how to either use a mock-test port to
    862         # get output or mack mock ports work again.
    863         host = Host()
    864         _, err, _ = logging_run(['--platform', 'mock-win', '--driver-logging', 'fast/harness/results.html'],
    865                                 tests_included=True, host=host)
    866         self.assertTrue('OUT:' in err.getvalue())
    867 
    868     def test_write_full_results_to(self):
    869         host = MockHost()
    870         details, _, _ = logging_run(['--write-full-results-to', '/tmp/full_results.json'], host=host)
    871         self.assertEqual(details.exit_code, 0)
    872         self.assertTrue(host.filesystem.exists('/tmp/full_results.json'))
    873 
    874 
    875 class EndToEndTest(unittest.TestCase):
    876     def test_reftest_with_two_notrefs(self):
    877         # Test that we update expectations in place. If the expectation
    878         # is missing, update the expected generic location.
    879         host = MockHost()
    880         _, _, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host)
    881         file_list = host.filesystem.written_files.keys()
    882 
    883         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/failing_results.json')
    884         json = parse_full_results(json_string)
    885         self.assertTrue("multiple-match-success.html" not in json["tests"]["reftests"]["foo"])
    886         self.assertTrue("multiple-mismatch-success.html" not in json["tests"]["reftests"]["foo"])
    887         self.assertTrue("multiple-both-success.html" not in json["tests"]["reftests"]["foo"])
    888 
    889         self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-match-failure.html"],
    890             {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["=="], "is_unexpected": True})
    891         self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-mismatch-failure.html"],
    892             {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["!="], "is_unexpected": True})
    893         self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-both-failure.html"],
    894             {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["==", "!="], "is_unexpected": True})
    895 
    896 
    897 class RebaselineTest(unittest.TestCase, StreamTestingMixin):
    898     def assertBaselines(self, file_list, file, extensions, err):
    899         "assert that the file_list contains the baselines."""
    900         for ext in extensions:
    901             baseline = file + "-expected" + ext
    902             baseline_msg = 'Writing new expected result "%s"\n' % baseline
    903             self.assertTrue(any(f.find(baseline) != -1 for f in file_list))
    904             self.assertContains(err, baseline_msg)
    905 
    906     # FIXME: Add tests to ensure that we're *not* writing baselines when we're not
    907     # supposed to be.
    908 
    909     def test_reset_results(self):
    910         # Test that we update expectations in place. If the expectation
    911         # is missing, update the expected generic location.
    912         host = MockHost()
    913         details, err, _ = logging_run(
    914             ['--pixel-tests', '--reset-results', 'passes/image.html', 'failures/expected/missing_image.html'],
    915             tests_included=True, host=host, new_results=True)
    916         file_list = host.filesystem.written_files.keys()
    917         self.assertEqual(details.exit_code, 0)
    918         self.assertEqual(len(file_list), 8)
    919         self.assertBaselines(file_list, "passes/image", [".txt", ".png"], err)
    920         self.assertBaselines(file_list, "failures/expected/missing_image", [".txt", ".png"], err)
    921 
    922     def test_missing_results(self):
    923         # Test that we update expectations in place. If the expectation
    924         # is missing, update the expected generic location.
    925         host = MockHost()
    926         details, err, _ = logging_run(['--no-show-results',
    927             'failures/unexpected/missing_text.html',
    928             'failures/unexpected/missing_image.html',
    929             'failures/unexpected/missing_render_tree_dump.html'],
    930             tests_included=True, host=host, new_results=True)
    931         file_list = host.filesystem.written_files.keys()
    932         self.assertEqual(details.exit_code, 3)
    933         self.assertEqual(len(file_list), 10)
    934         self.assertBaselines(file_list, "failures/unexpected/missing_text", [".txt"], err)
    935         self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_image", [".png"], err)
    936         self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_render_tree_dump", [".txt"], err)
    937 
    938     def test_missing_results_not_added_if_expected_missing(self):
    939         # Test that we update expectations in place. If the expectation
    940         # is missing, update the expected generic location.
    941         host = MockHost()
    942         options, parsed_args = run_webkit_tests.parse_args([])
    943 
    944         port = test.TestPort(host, options=options)
    945         host.filesystem.write_text_file(port.path_to_generic_test_expectations_file(), """
    946 Bug(foo) failures/unexpected/missing_text.html [ Missing ]
    947 Bug(foo) failures/unexpected/missing_image.html [ NeedsRebaseline ]
    948 Bug(foo) failures/unexpected/missing_audio.html [ NeedsManualRebaseline ]
    949 Bug(foo) failures/unexpected/missing_render_tree_dump.html [ Missing ]
    950 """)
    951         details, err, _ = logging_run(['--no-show-results',
    952             'failures/unexpected/missing_text.html',
    953             'failures/unexpected/missing_image.html',
    954             'failures/unexpected/missing_audio.html',
    955             'failures/unexpected/missing_render_tree_dump.html'],
    956             tests_included=True, host=host, new_results=True,  port_obj=port)
    957         file_list = host.filesystem.written_files.keys()
    958         self.assertEqual(details.exit_code, 0)
    959         self.assertEqual(len(file_list), 7)
    960         self.assertFalse(any('failures/unexpected/missing_text-expected' in file for file in file_list))
    961         self.assertFalse(any('failures/unexpected/missing_image-expected' in file for file in file_list))
    962         self.assertFalse(any('failures/unexpected/missing_render_tree_dump-expected' in file for file in file_list))
    963 
    964     def test_missing_results_not_added_if_expected_missing_and_reset_results(self):
    965         # Test that we update expectations in place. If the expectation
    966         # is missing, update the expected generic location.
    967         host = MockHost()
    968         options, parsed_args = run_webkit_tests.parse_args(['--pixel-tests', '--reset-results'])
    969 
    970         port = test.TestPort(host, options=options)
    971         host.filesystem.write_text_file(port.path_to_generic_test_expectations_file(), """
    972 Bug(foo) failures/unexpected/missing_text.html [ Missing ]
    973 Bug(foo) failures/unexpected/missing_image.html [ NeedsRebaseline ]
    974 Bug(foo) failures/unexpected/missing_audio.html [ NeedsManualRebaseline ]
    975 Bug(foo) failures/unexpected/missing_render_tree_dump.html [ Missing ]
    976 """)
    977         details, err, _ = logging_run(['--pixel-tests', '--reset-results',
    978             'failures/unexpected/missing_text.html',
    979             'failures/unexpected/missing_image.html',
    980             'failures/unexpected/missing_audio.html',
    981             'failures/unexpected/missing_render_tree_dump.html'],
    982             tests_included=True, host=host, new_results=True,  port_obj=port)
    983         file_list = host.filesystem.written_files.keys()
    984         self.assertEqual(details.exit_code, 0)
    985         self.assertEqual(len(file_list), 11)
    986         self.assertBaselines(file_list, "failures/unexpected/missing_text", [".txt"], err)
    987         self.assertBaselines(file_list, "failures/unexpected/missing_image", [".png"], err)
    988         self.assertBaselines(file_list, "failures/unexpected/missing_render_tree_dump", [".txt"], err)
    989 
    990     def test_new_baseline(self):
    991         # Test that we update the platform expectations in the version-specific directories
    992         # for both existing and new baselines.
    993         host = MockHost()
    994         details, err, _ = logging_run(
    995             ['--pixel-tests', '--new-baseline', 'passes/image.html', 'failures/expected/missing_image.html'],
    996             tests_included=True, host=host, new_results=True)
    997         file_list = host.filesystem.written_files.keys()
    998         self.assertEqual(details.exit_code, 0)
    999         self.assertEqual(len(file_list), 8)
   1000         self.assertBaselines(file_list,
   1001             "platform/test-mac-leopard/passes/image", [".txt", ".png"], err)
   1002         self.assertBaselines(file_list,
   1003             "platform/test-mac-leopard/failures/expected/missing_image", [".txt", ".png"], err)
   1004 
   1005 
   1006 class PortTest(unittest.TestCase):
   1007     def assert_mock_port_works(self, port_name, args=[]):
   1008         self.assertTrue(passing_run(args + ['--platform', 'mock-' + port_name, 'fast/harness/results.html'], tests_included=True, host=Host()))
   1009 
   1010     def disabled_test_mac_lion(self):
   1011         self.assert_mock_port_works('mac-lion')
   1012 
   1013 
   1014 class MainTest(unittest.TestCase):
   1015     def test_exception_handling(self):
   1016         orig_run_fn = run_webkit_tests.run
   1017 
   1018         # unused args pylint: disable=W0613
   1019         def interrupting_run(port, options, args, stderr):
   1020             raise KeyboardInterrupt
   1021 
   1022         def successful_run(port, options, args, stderr):
   1023 
   1024             class FakeRunDetails(object):
   1025                 exit_code = test_run_results.UNEXPECTED_ERROR_EXIT_STATUS
   1026 
   1027             return FakeRunDetails()
   1028 
   1029         def exception_raising_run(port, options, args, stderr):
   1030             assert False
   1031 
   1032         stdout = StringIO.StringIO()
   1033         stderr = StringIO.StringIO()
   1034         try:
   1035             run_webkit_tests.run = interrupting_run
   1036             res = run_webkit_tests.main([], stdout, stderr)
   1037             self.assertEqual(res, test_run_results.INTERRUPTED_EXIT_STATUS)
   1038 
   1039             run_webkit_tests.run = successful_run
   1040             res = run_webkit_tests.main(['--platform', 'test'], stdout, stderr)
   1041             self.assertEqual(res, test_run_results.UNEXPECTED_ERROR_EXIT_STATUS)
   1042 
   1043             run_webkit_tests.run = exception_raising_run
   1044             res = run_webkit_tests.main([], stdout, stderr)
   1045             self.assertEqual(res, test_run_results.UNEXPECTED_ERROR_EXIT_STATUS)
   1046         finally:
   1047             run_webkit_tests.run = orig_run_fn
   1048 
   1049     def test_buildbot_results_are_printed_on_early_exit(self):
   1050         # unused args pylint: disable=W0613
   1051         stdout = StringIO.StringIO()
   1052         stderr = StringIO.StringIO()
   1053         res = run_webkit_tests.main(['--platform', 'test', '--exit-after-n-failures', '1',
   1054                                      'failures/unexpected/missing_text.html',
   1055                                      'failures/unexpected/missing_image.html'],
   1056                                     stdout, stderr)
   1057         self.assertEqual(res, test_run_results.EARLY_EXIT_STATUS)
   1058         self.assertEqual(stdout.getvalue(),
   1059                 ('\n'
   1060                  'Regressions: Unexpected missing results (1)\n'
   1061                  '  failures/unexpected/missing_image.html [ Missing ]\n\n'))
   1062