Home | History | Annotate | Download | only in layout_tests
      1 # Copyright (C) 2010 Google Inc. All rights reserved.
      2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor (at] inf.u-szeged.hu), University of Szeged
      3 # Copyright (C) 2011 Apple Inc. All rights reserved.
      4 #
      5 # Redistribution and use in source and binary forms, with or without
      6 # modification, are permitted provided that the following conditions are
      7 # met:
      8 #
      9 #     * Redistributions of source code must retain the above copyright
     10 # notice, this list of conditions and the following disclaimer.
     11 #     * Redistributions in binary form must reproduce the above
     12 # copyright notice, this list of conditions and the following disclaimer
     13 # in the documentation and/or other materials provided with the
     14 # distribution.
     15 #     * Neither the name of Google Inc. nor the names of its
     16 # contributors may be used to endorse or promote products derived from
     17 # this software without specific prior written permission.
     18 #
     19 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     20 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     21 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     22 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     23 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     24 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     25 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     26 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     27 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     28 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     30 
     31 import codecs
     32 import json
     33 import logging
     34 import os
     35 import platform
     36 import Queue
     37 import re
     38 import StringIO
     39 import sys
     40 import thread
     41 import time
     42 import threading
     43 import webkitpy.thirdparty.unittest2 as unittest
     44 
     45 from webkitpy.common.system import outputcapture, path
     46 from webkitpy.common.system.crashlogs_unittest import make_mock_crash_report_darwin
     47 from webkitpy.common.system.systemhost import SystemHost
     48 from webkitpy.common.host import Host
     49 from webkitpy.common.host_mock import MockHost
     50 
     51 from webkitpy.layout_tests import port
     52 from webkitpy.layout_tests import run_webkit_tests
     53 from webkitpy.layout_tests.port import Port
     54 from webkitpy.layout_tests.port import test
     55 from webkitpy.test.skip import skip_if
     56 from webkitpy.tool import grammar
     57 from webkitpy.tool.mocktool import MockOptions
     58 
     59 
     60 def parse_args(extra_args=None, tests_included=False, new_results=False, print_nothing=True):
     61     extra_args = extra_args or []
     62     args = []
     63     if not '--platform' in extra_args:
     64         args.extend(['--platform', 'test'])
     65     if not new_results:
     66         args.append('--no-new-test-results')
     67 
     68     if not '--child-processes' in extra_args:
     69         args.extend(['--child-processes', 1])
     70     args.extend(extra_args)
     71     if not tests_included:
     72         # We use the glob to test that globbing works.
     73         args.extend(['passes',
     74                      'http/tests',
     75                      'websocket/tests',
     76                      'failures/expected/*'])
     77     return run_webkit_tests.parse_args(args)
     78 
     79 
     80 def passing_run(extra_args=None, port_obj=None, tests_included=False, host=None, shared_port=True):
     81     options, parsed_args = parse_args(extra_args, tests_included)
     82     if not port_obj:
     83         host = host or MockHost()
     84         port_obj = host.port_factory.get(port_name=options.platform, options=options)
     85 
     86     if shared_port:
     87         port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
     88 
     89     logging_stream = StringIO.StringIO()
     90     run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
     91     return run_details.exit_code == 0
     92 
     93 
     94 def logging_run(extra_args=None, port_obj=None, tests_included=False, host=None, new_results=False, shared_port=True):
     95     options, parsed_args = parse_args(extra_args=extra_args,
     96                                       tests_included=tests_included,
     97                                       print_nothing=False, new_results=new_results)
     98     host = host or MockHost()
     99     if not port_obj:
    100         port_obj = host.port_factory.get(port_name=options.platform, options=options)
    101 
    102     run_details, output = run_and_capture(port_obj, options, parsed_args, shared_port)
    103     return (run_details, output, host.user)
    104 
    105 
    106 def run_and_capture(port_obj, options, parsed_args, shared_port=True):
    107     if shared_port:
    108         port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
    109     oc = outputcapture.OutputCapture()
    110     try:
    111         oc.capture_output()
    112         logging_stream = StringIO.StringIO()
    113         run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
    114     finally:
    115         oc.restore_output()
    116     return (run_details, logging_stream)
    117 
    118 
    119 def get_tests_run(args, host=None):
    120     results = get_test_results(args, host)
    121     return [result.test_name for result in results]
    122 
    123 
    124 def get_test_batches(args, host=None):
    125     results = get_test_results(args, host)
    126     batches = []
    127     batch = []
    128     current_pid = None
    129     for result in results:
    130         if batch and result.pid != current_pid:
    131             batches.append(batch)
    132             batch = []
    133         batch.append(result.test_name)
    134     if batch:
    135         batches.append(batch)
    136     return batches
    137 
    138 
    139 def get_test_results(args, host=None):
    140     options, parsed_args = parse_args(args, tests_included=True)
    141 
    142     host = host or MockHost()
    143     port_obj = host.port_factory.get(port_name=options.platform, options=options)
    144 
    145     oc = outputcapture.OutputCapture()
    146     oc.capture_output()
    147     logging_stream = StringIO.StringIO()
    148     try:
    149         run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
    150     finally:
    151         oc.restore_output()
    152 
    153     all_results = []
    154     if run_details.initial_results:
    155         all_results.extend(run_details.initial_results.all_results)
    156 
    157     if run_details.retry_results:
    158         all_results.extend(run_details.retry_results.all_results)
    159     return all_results
    160 
    161 
    162 def parse_full_results(full_results_text):
    163     json_to_eval = full_results_text.replace("ADD_RESULTS(", "").replace(");", "")
    164     compressed_results = json.loads(json_to_eval)
    165     return compressed_results
    166 
    167 
    168 class StreamTestingMixin(object):
    169     def assertContains(self, stream, string):
    170         self.assertTrue(string in stream.getvalue())
    171 
    172     def assertEmpty(self, stream):
    173         self.assertFalse(stream.getvalue())
    174 
    175     def assertNotEmpty(self, stream):
    176         self.assertTrue(stream.getvalue())
    177 
    178 
    179 class RunTest(unittest.TestCase, StreamTestingMixin):
    180     def setUp(self):
    181         # A real PlatformInfo object is used here instead of a
    182         # MockPlatformInfo because we need to actually check for
    183         # Windows and Mac to skip some tests.
    184         self._platform = SystemHost().platform
    185 
    186         # FIXME: Remove this when we fix test-webkitpy to work
    187         # properly on cygwin (bug 63846).
    188         self.should_test_processes = not self._platform.is_win()
    189 
    190     def test_basic(self):
    191         options, args = parse_args(tests_included=True)
    192         logging_stream = StringIO.StringIO()
    193         host = MockHost()
    194         port_obj = host.port_factory.get(options.platform, options)
    195         details = run_webkit_tests.run(port_obj, options, args, logging_stream)
    196 
    197         # These numbers will need to be updated whenever we add new tests.
    198         self.assertEqual(details.initial_results.total, test.TOTAL_TESTS)
    199         self.assertEqual(details.initial_results.expected_skips, test.TOTAL_SKIPS)
    200         self.assertEqual(len(details.initial_results.unexpected_results_by_name), test.UNEXPECTED_PASSES + test.UNEXPECTED_FAILURES)
    201         self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES)
    202         self.assertEqual(details.retry_results.total, test.UNEXPECTED_FAILURES)
    203 
    204         expected_tests = details.initial_results.total - details.initial_results.expected_skips - len(details.initial_results.unexpected_results_by_name)
    205         expected_summary_str = ''
    206         if details.initial_results.expected_failures > 0:
    207             expected_summary_str = " (%d passed, %d didn't)" % (expected_tests - details.initial_results.expected_failures, details.initial_results.expected_failures)
    208         one_line_summary = "%d tests ran as expected%s, %d didn't:\n" % (
    209             expected_tests,
    210             expected_summary_str,
    211             len(details.initial_results.unexpected_results_by_name))
    212         self.assertTrue(one_line_summary in logging_stream.buflist)
    213 
    214         # Ensure the results were summarized properly.
    215         self.assertEqual(details.summarized_failing_results['num_regressions'], details.exit_code)
    216 
    217         # Ensure the results were written out and displayed.
    218         failing_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/failing_results.json')
    219         json_to_eval = failing_results_text.replace("ADD_RESULTS(", "").replace(");", "")
    220         self.assertEqual(json.loads(json_to_eval), details.summarized_failing_results)
    221 
    222         full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
    223         self.assertEqual(json.loads(full_results_text), details.summarized_full_results)
    224 
    225         self.assertEqual(host.user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
    226 
    227 
    228     def test_batch_size(self):
    229         batch_tests_run = get_test_batches(['--batch-size', '2'])
    230         for batch in batch_tests_run:
    231             self.assertTrue(len(batch) <= 2, '%s had too many tests' % ', '.join(batch))
    232 
    233     def test_max_locked_shards(self):
    234         # Tests for the default of using one locked shard even in the case of more than one child process.
    235         if not self.should_test_processes:
    236             return
    237         save_env_webkit_test_max_locked_shards = None
    238         if "WEBKIT_TEST_MAX_LOCKED_SHARDS" in os.environ:
    239             save_env_webkit_test_max_locked_shards = os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"]
    240             del os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"]
    241         _, regular_output, _ = logging_run(['--debug-rwt-logging', '--child-processes', '2'], shared_port=False)
    242         try:
    243             self.assertTrue(any(['1 locked' in line for line in regular_output.buflist]))
    244         finally:
    245             if save_env_webkit_test_max_locked_shards:
    246                 os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"] = save_env_webkit_test_max_locked_shards
    247 
    248     def test_child_processes_2(self):
    249         if self.should_test_processes:
    250             _, regular_output, _ = logging_run(
    251                 ['--debug-rwt-logging', '--child-processes', '2'], shared_port=False)
    252             self.assertTrue(any(['Running 2 ' in line for line in regular_output.buflist]))
    253 
    254     def test_child_processes_min(self):
    255         if self.should_test_processes:
    256             _, regular_output, _ = logging_run(
    257                 ['--debug-rwt-logging', '--child-processes', '2', '-i', 'passes/passes', 'passes'],
    258                 tests_included=True, shared_port=False)
    259             self.assertTrue(any(['Running 1 ' in line for line in regular_output.buflist]))
    260 
    261     def test_dryrun(self):
    262         tests_run = get_tests_run(['--dry-run'])
    263         self.assertEqual(tests_run, [])
    264 
    265         tests_run = get_tests_run(['-n'])
    266         self.assertEqual(tests_run, [])
    267 
    268     def test_exception_raised(self):
    269         # Exceptions raised by a worker are treated differently depending on
    270         # whether they are in-process or out. inline exceptions work as normal,
    271         # which allows us to get the full stack trace and traceback from the
    272         # worker. The downside to this is that it could be any error, but this
    273         # is actually useful in testing.
    274         #
    275         # Exceptions raised in a separate process are re-packaged into
    276         # WorkerExceptions (a subclass of BaseException), which have a string capture of the stack which can
    277         # be printed, but don't display properly in the unit test exception handlers.
    278         self.assertRaises(BaseException, logging_run,
    279             ['failures/expected/exception.html', '--child-processes', '1'], tests_included=True)
    280 
    281         if self.should_test_processes:
    282             self.assertRaises(BaseException, logging_run,
    283                 ['--child-processes', '2', '--skipped=ignore', 'failures/expected/exception.html', 'passes/text.html'], tests_included=True, shared_port=False)
    284 
    285     def test_full_results_html(self):
    286         host = MockHost()
    287         details, _, _ = logging_run(['--full-results-html'], host=host)
    288         self.assertEqual(details.exit_code, 0)
    289         self.assertEqual(len(host.user.opened_urls), 1)
    290 
    291     def test_hung_thread(self):
    292         details, err, _ = logging_run(['--run-singly', '--time-out-ms=50', 'failures/expected/hang.html'], tests_included=True)
    293         # Note that hang.html is marked as WontFix and all WontFix tests are
    294         # expected to Pass, so that actually running them generates an "unexpected" error.
    295         self.assertEqual(details.exit_code, 1)
    296         self.assertNotEmpty(err)
    297 
    298     def test_keyboard_interrupt(self):
    299         # Note that this also tests running a test marked as SKIP if
    300         # you specify it explicitly.
    301         self.assertRaises(KeyboardInterrupt, logging_run, ['failures/expected/keyboard.html', '--child-processes', '1'], tests_included=True)
    302 
    303         if self.should_test_processes:
    304             self.assertRaises(KeyboardInterrupt, logging_run,
    305                 ['failures/expected/keyboard.html', 'passes/text.html', '--child-processes', '2', '--skipped=ignore'], tests_included=True, shared_port=False)
    306 
    307     def test_no_tests_found(self):
    308         details, err, _ = logging_run(['resources'], tests_included=True)
    309         self.assertEqual(details.exit_code, -1)
    310         self.assertContains(err, 'No tests to run.\n')
    311 
    312     def test_no_tests_found_2(self):
    313         details, err, _ = logging_run(['foo'], tests_included=True)
    314         self.assertEqual(details.exit_code, -1)
    315         self.assertContains(err, 'No tests to run.\n')
    316 
    317     def test_natural_order(self):
    318         tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
    319         tests_run = get_tests_run(['--order=natural'] + tests_to_run)
    320         self.assertEqual(['failures/expected/missing_text.html', 'failures/expected/text.html', 'passes/args.html', 'passes/audio.html'], tests_run)
    321 
    322     def test_natural_order_test_specified_multiple_times(self):
    323         tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
    324         tests_run = get_tests_run(['--order=natural'] + tests_to_run)
    325         self.assertEqual(['passes/args.html', 'passes/args.html', 'passes/audio.html', 'passes/audio.html'], tests_run)
    326 
    327     def test_random_order(self):
    328         tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
    329         tests_run = get_tests_run(['--order=random'] + tests_to_run)
    330         self.assertEqual(sorted(tests_to_run), sorted(tests_run))
    331 
    332     def test_random_daily_seed_order(self):
    333         tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
    334         tests_run = get_tests_run(['--order=random-seeded'] + tests_to_run)
    335         self.assertEqual(sorted(tests_to_run), sorted(tests_run))
    336 
    337     def test_random_order_test_specified_multiple_times(self):
    338         tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
    339         tests_run = get_tests_run(['--order=random'] + tests_to_run)
    340         self.assertEqual(tests_run.count('passes/audio.html'), 2)
    341         self.assertEqual(tests_run.count('passes/args.html'), 2)
    342 
    343     def test_no_order(self):
    344         tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
    345         tests_run = get_tests_run(['--order=none'] + tests_to_run)
    346         self.assertEqual(tests_to_run, tests_run)
    347 
    348     def test_no_order_test_specified_multiple_times(self):
    349         tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
    350         tests_run = get_tests_run(['--order=none'] + tests_to_run)
    351         self.assertEqual(tests_to_run, tests_run)
    352 
    353     def test_no_order_with_directory_entries_in_natural_order(self):
    354         tests_to_run = ['http/tests/ssl', 'perf/foo', 'http/tests/passes']
    355         tests_run = get_tests_run(['--order=none'] + tests_to_run)
    356         self.assertEqual(tests_run, ['http/tests/ssl/text.html', 'perf/foo/test.html', 'http/tests/passes/image.html', 'http/tests/passes/text.html'])
    357 
    358     def test_repeat_each(self):
    359         tests_to_run = ['passes/image.html', 'passes/text.html']
    360         tests_run = get_tests_run(['--repeat-each', '2'] + tests_to_run)
    361         self.assertEqual(tests_run, ['passes/image.html', 'passes/image.html', 'passes/text.html', 'passes/text.html'])
    362 
    363     def test_ignore_flag(self):
    364         # Note that passes/image.html is expected to be run since we specified it directly.
    365         tests_run = get_tests_run(['-i', 'passes', 'passes/image.html'])
    366         self.assertFalse('passes/text.html' in tests_run)
    367         self.assertTrue('passes/image.html' in tests_run)
    368 
    369     def test_skipped_flag(self):
    370         tests_run = get_tests_run(['passes'])
    371         self.assertFalse('passes/skipped/skip.html' in tests_run)
    372         num_tests_run_by_default = len(tests_run)
    373 
    374         # Check that nothing changes when we specify skipped=default.
    375         self.assertEqual(len(get_tests_run(['--skipped=default', 'passes'])),
    376                           num_tests_run_by_default)
    377 
    378         # Now check that we run one more test (the skipped one).
    379         tests_run = get_tests_run(['--skipped=ignore', 'passes'])
    380         self.assertTrue('passes/skipped/skip.html' in tests_run)
    381         self.assertEqual(len(tests_run), num_tests_run_by_default + 1)
    382 
    383         # Now check that we only run the skipped test.
    384         self.assertEqual(get_tests_run(['--skipped=only', 'passes']), ['passes/skipped/skip.html'])
    385 
    386         # Now check that we don't run anything.
    387         self.assertEqual(get_tests_run(['--skipped=always', 'passes/skipped/skip.html']), [])
    388 
    389     def test_iterations(self):
    390         tests_to_run = ['passes/image.html', 'passes/text.html']
    391         tests_run = get_tests_run(['--iterations', '2'] + tests_to_run)
    392         self.assertEqual(tests_run, ['passes/image.html', 'passes/text.html', 'passes/image.html', 'passes/text.html'])
    393 
    394     def test_repeat_each_iterations_num_tests(self):
    395         # The total number of tests should be: number_of_tests *
    396         # repeat_each * iterations
    397         host = MockHost()
    398         _, err, _ = logging_run(
    399             ['--iterations', '2', '--repeat-each', '4', '--debug-rwt-logging', 'passes/text.html', 'failures/expected/text.html'],
    400             tests_included=True, host=host)
    401         self.assertContains(err, "All 16 tests ran as expected (8 passed, 8 didn't).\n")
    402 
    403     def test_run_chunk(self):
    404         # Test that we actually select the right chunk
    405         all_tests_run = get_tests_run(['passes', 'failures'])
    406         chunk_tests_run = get_tests_run(['--run-chunk', '1:4', 'passes', 'failures'])
    407         self.assertEqual(all_tests_run[4:8], chunk_tests_run)
    408 
    409         # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
    410         tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
    411         chunk_tests_run = get_tests_run(['--run-chunk', '1:3'] + tests_to_run)
    412         self.assertEqual(['passes/text.html', 'passes/error.html', 'passes/image.html'], chunk_tests_run)
    413 
    414     def test_run_part(self):
    415         # Test that we actually select the right part
    416         tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
    417         tests_run = get_tests_run(['--run-part', '1:2'] + tests_to_run)
    418         self.assertEqual(['passes/error.html', 'passes/image.html'], tests_run)
    419 
    420         # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
    421         # (here we end up with 3 parts, each with 2 tests, and we only have 4 tests total, so the
    422         # last part repeats the first two tests).
    423         chunk_tests_run = get_tests_run(['--run-part', '3:3'] + tests_to_run)
    424         self.assertEqual(['passes/error.html', 'passes/image.html'], chunk_tests_run)
    425 
    426     def test_run_singly(self):
    427         batch_tests_run = get_test_batches(['--run-singly'])
    428         for batch in batch_tests_run:
    429             self.assertEqual(len(batch), 1, '%s had too many tests' % ', '.join(batch))
    430 
    431     def test_skip_failing_tests(self):
    432         # This tests that we skip both known failing and known flaky tests. Because there are
    433         # no known flaky tests in the default test_expectations, we add additional expectations.
    434         host = MockHost()
    435         host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) passes/image.html [ ImageOnlyFailure Pass ]\n')
    436 
    437         batches = get_test_batches(['--skip-failing-tests', '--additional-expectations', '/tmp/overrides.txt'], host=host)
    438         has_passes_text = False
    439         for batch in batches:
    440             self.assertFalse('failures/expected/text.html' in batch)
    441             self.assertFalse('passes/image.html' in batch)
    442             has_passes_text = has_passes_text or ('passes/text.html' in batch)
    443         self.assertTrue(has_passes_text)
    444 
    445     def test_run_singly_actually_runs_tests(self):
    446         details, _, _ = logging_run(['--run-singly'], tests_included=True)
    447         self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES - 1)  # failures/expected/hang.html actually passes w/ --run-singly.
    448 
    449     def test_single_file(self):
    450         tests_run = get_tests_run(['passes/text.html'])
    451         self.assertEqual(tests_run, ['passes/text.html'])
    452 
    453     def test_single_file_with_prefix(self):
    454         tests_run = get_tests_run(['LayoutTests/passes/text.html'])
    455         self.assertEqual(['passes/text.html'], tests_run)
    456 
    457     def test_single_skipped_file(self):
    458         tests_run = get_tests_run(['failures/expected/keybaord.html'])
    459         self.assertEqual([], tests_run)
    460 
    461     def test_stderr_is_saved(self):
    462         host = MockHost()
    463         self.assertTrue(passing_run(host=host))
    464         self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/passes/error-stderr.txt'),
    465                           'stuff going to stderr')
    466 
    467     def test_test_list(self):
    468         host = MockHost()
    469         filename = '/tmp/foo.txt'
    470         host.filesystem.write_text_file(filename, 'passes/text.html')
    471         tests_run = get_tests_run(['--test-list=%s' % filename], host=host)
    472         self.assertEqual(['passes/text.html'], tests_run)
    473         host.filesystem.remove(filename)
    474         details, err, user = logging_run(['--test-list=%s' % filename], tests_included=True, host=host)
    475         self.assertEqual(details.exit_code, -1)
    476         self.assertNotEmpty(err)
    477 
    478     def test_test_list_with_prefix(self):
    479         host = MockHost()
    480         filename = '/tmp/foo.txt'
    481         host.filesystem.write_text_file(filename, 'LayoutTests/passes/text.html')
    482         tests_run = get_tests_run(['--test-list=%s' % filename], host=host)
    483         self.assertEqual(['passes/text.html'], tests_run)
    484 
    485     def test_missing_and_unexpected_results(self):
    486         # Test that we update expectations in place. If the expectation
    487         # is missing, update the expected generic location.
    488         host = MockHost()
    489         details, err, _ = logging_run(['--no-show-results', '--retry-failures',
    490             'failures/expected/missing_image.html',
    491             'failures/unexpected/missing_text.html',
    492             'failures/unexpected/text-image-checksum.html'],
    493             tests_included=True, host=host)
    494         file_list = host.filesystem.written_files.keys()
    495         self.assertEqual(details.exit_code, 2)
    496         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
    497         self.assertTrue(json_string.find('"text-image-checksum.html":{"expected":"PASS","actual":"IMAGE+TEXT","is_unexpected":true') != -1)
    498         self.assertTrue(json_string.find('"missing_text.html":{"expected":"PASS","is_missing_text":true,"actual":"MISSING","is_unexpected":true') != -1)
    499         self.assertTrue(json_string.find('"num_regressions":2') != -1)
    500         self.assertTrue(json_string.find('"num_flaky":0') != -1)
    501 
    502     def test_pixel_test_directories(self):
    503         host = MockHost()
    504 
    505         """Both tests have failing checksum. We include only the first in pixel tests so only that should fail."""
    506         args = ['--pixel-tests', '--retry-failures', '--pixel-test-directory', 'failures/unexpected/pixeldir',
    507                 'failures/unexpected/pixeldir/image_in_pixeldir.html',
    508                 'failures/unexpected/image_not_in_pixeldir.html']
    509         details, err, _ = logging_run(extra_args=args, host=host, tests_included=True)
    510 
    511         self.assertEqual(details.exit_code, 1)
    512         expected_token = '"pixeldir":{"image_in_pixeldir.html":{"expected":"PASS","actual":"IMAGE","is_unexpected":true'
    513         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
    514         self.assertTrue(json_string.find(expected_token) != -1)
    515 
    516     def test_crash_with_stderr(self):
    517         host = MockHost()
    518         _, regular_output, _ = logging_run(['failures/unexpected/crash-with-stderr.html'], tests_included=True, host=host)
    519         self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('{"crash-with-stderr.html":{"expected":"PASS","actual":"CRASH","has_stderr":true,"is_unexpected":true') != -1)
    520 
    521     def test_no_image_failure_with_image_diff(self):
    522         host = MockHost()
    523         _, regular_output, _ = logging_run(['failures/unexpected/checksum-with-matching-image.html'], tests_included=True, host=host)
    524         self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('"num_regressions":0') != -1)
    525 
    526     def test_exit_after_n_failures_upload(self):
    527         host = MockHost()
    528         details, regular_output, user = logging_run(
    529            ['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'],
    530            tests_included=True, host=host)
    531 
    532         # By returning False, we know that the incremental results were generated and then deleted.
    533         self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/incremental_results.json'))
    534 
    535         # This checks that we report only the number of tests that actually failed.
    536         self.assertEqual(details.exit_code, 1)
    537 
    538         # This checks that passes/text.html is considered SKIPped.
    539         self.assertTrue('"skipped":1' in host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))
    540 
    541         # This checks that we told the user we bailed out.
    542         self.assertTrue('Exiting early after 1 failures. 1 tests run.\n' in regular_output.getvalue())
    543 
    544         # This checks that neither test ran as expected.
    545         # FIXME: This log message is confusing; tests that were skipped should be called out separately.
    546         self.assertTrue('0 tests ran as expected, 2 didn\'t:\n' in regular_output.getvalue())
    547 
    548     def test_exit_after_n_failures(self):
    549         # Unexpected failures should result in tests stopping.
    550         tests_run = get_tests_run(['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'])
    551         self.assertEqual(['failures/unexpected/text-image-checksum.html'], tests_run)
    552 
    553         # But we'll keep going for expected ones.
    554         tests_run = get_tests_run(['failures/expected/text.html', 'passes/text.html', '--exit-after-n-failures', '1'])
    555         self.assertEqual(['failures/expected/text.html', 'passes/text.html'], tests_run)
    556 
    557     def test_exit_after_n_crashes(self):
    558         # Unexpected crashes should result in tests stopping.
    559         tests_run = get_tests_run(['failures/unexpected/crash.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
    560         self.assertEqual(['failures/unexpected/crash.html'], tests_run)
    561 
    562         # Same with timeouts.
    563         tests_run = get_tests_run(['failures/unexpected/timeout.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
    564         self.assertEqual(['failures/unexpected/timeout.html'], tests_run)
    565 
    566         # But we'll keep going for expected ones.
    567         tests_run = get_tests_run(['failures/expected/crash.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
    568         self.assertEqual(['failures/expected/crash.html', 'passes/text.html'], tests_run)
    569 
    570     def test_results_directory_absolute(self):
    571         # We run a configuration that should fail, to generate output, then
    572         # look for what the output results url was.
    573 
    574         host = MockHost()
    575         with host.filesystem.mkdtemp() as tmpdir:
    576             _, _, user = logging_run(['--results-directory=' + str(tmpdir)], tests_included=True, host=host)
    577             self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, host.filesystem.join(tmpdir, 'results.html'))])
    578 
    579     def test_results_directory_default(self):
    580         # We run a configuration that should fail, to generate output, then
    581         # look for what the output results url was.
    582 
    583         # This is the default location.
    584         _, _, user = logging_run(tests_included=True)
    585         self.assertEqual(user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
    586 
    587     def test_results_directory_relative(self):
    588         # We run a configuration that should fail, to generate output, then
    589         # look for what the output results url was.
    590         host = MockHost()
    591         host.filesystem.maybe_make_directory('/tmp/cwd')
    592         host.filesystem.chdir('/tmp/cwd')
    593         _, _, user = logging_run(['--results-directory=foo'], tests_included=True, host=host)
    594         self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, '/tmp/cwd/foo/results.html')])
    595 
    596     def test_retrying_default_value(self):
    597         host = MockHost()
    598         details, err, _ = logging_run(['--debug-rwt-logging', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
    599         self.assertEqual(details.exit_code, 1)
    600         self.assertFalse('Retrying' in err.getvalue())
    601 
    602         host = MockHost()
    603         details, err, _ = logging_run(['--debug-rwt-logging', 'failures/unexpected'], tests_included=True, host=host)
    604         self.assertEqual(details.exit_code, 16)
    605         self.assertTrue('Retrying' in err.getvalue())
    606 
    607     def test_retrying_default_value_test_list(self):
    608         host = MockHost()
    609         filename = '/tmp/foo.txt'
    610         host.filesystem.write_text_file(filename, 'failures/unexpected/text-image-checksum.html\nfailures/unexpected/crash.html')
    611         details, err, _ = logging_run(['--debug-rwt-logging', '--test-list=%s' % filename], tests_included=True, host=host)
    612         self.assertEqual(details.exit_code, 2)
    613         self.assertFalse('Retrying' in err.getvalue())
    614 
    615         host = MockHost()
    616         filename = '/tmp/foo.txt'
    617         host.filesystem.write_text_file(filename, 'failures')
    618         details, err, _ = logging_run(['--debug-rwt-logging', '--test-list=%s' % filename], tests_included=True, host=host)
    619         self.assertEqual(details.exit_code, 16)
    620         self.assertTrue('Retrying' in err.getvalue())
    621 
    622     def test_retrying_and_flaky_tests(self):
    623         host = MockHost()
    624         details, err, _ = logging_run(['--debug-rwt-logging', '--retry-failures', 'failures/flaky'], tests_included=True, host=host)
    625         self.assertEqual(details.exit_code, 0)
    626         self.assertTrue('Retrying' in err.getvalue())
    627         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
    628         self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/retries/failures/flaky/text-actual.txt'))
    629         self.assertEqual(len(host.user.opened_urls), 0)
    630 
    631         # Now we test that --clobber-old-results does remove the old entries and the old retries,
    632         # and that we don't retry again.
    633         host = MockHost()
    634         details, err, _ = logging_run(['--no-retry-failures', '--clobber-old-results', 'failures/flaky'], tests_included=True, host=host)
    635         self.assertEqual(details.exit_code, 1)
    636         self.assertTrue('Clobbering old results' in err.getvalue())
    637         self.assertTrue('flaky/text.html' in err.getvalue())
    638         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
    639         self.assertFalse(host.filesystem.exists('retries'))
    640         self.assertEqual(len(host.user.opened_urls), 1)
    641 
    642     def test_retrying_crashed_tests(self):
    643         host = MockHost()
    644         details, err, _ = logging_run(['--retry-failures', 'failures/unexpected/crash.html'], tests_included=True, host=host)
    645         self.assertEqual(details.exit_code, 1)
    646         self.assertTrue('Retrying' in err.getvalue())
    647 
    648     def test_retrying_force_pixel_tests(self):
    649         host = MockHost()
    650         details, err, _ = logging_run(['--no-pixel-tests', '--retry-failures', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
    651         self.assertEqual(details.exit_code, 1)
    652         self.assertTrue('Retrying' in err.getvalue())
    653         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt'))
    654         self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.png'))
    655         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.txt'))
    656         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.png'))
    657         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
    658         json = parse_full_results(json_string)
    659         self.assertEqual(json["tests"]["failures"]["unexpected"]["text-image-checksum.html"],
    660             {"expected": "PASS", "actual": "TEXT IMAGE+TEXT", "is_unexpected": True})
    661         self.assertFalse(json["pixel_tests_enabled"])
    662         self.assertEqual(details.enabled_pixel_tests_in_retry, True)
    663 
    664     def test_retrying_uses_retries_directory(self):
    665         host = MockHost()
    666         details, err, _ = logging_run(['--debug-rwt-logging', '--retry-failures', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
    667         self.assertEqual(details.exit_code, 1)
    668         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt'))
    669         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.txt'))
    670 
    671     def test_run_order__inline(self):
    672         # These next tests test that we run the tests in ascending alphabetical
    673         # order per directory. HTTP tests are sharded separately from other tests,
    674         # so we have to test both.
    675         tests_run = get_tests_run(['-i', 'passes/passes', 'passes'])
    676         self.assertEqual(tests_run, sorted(tests_run))
    677 
    678         tests_run = get_tests_run(['http/tests/passes'])
    679         self.assertEqual(tests_run, sorted(tests_run))
    680 
    681     def test_virtual(self):
    682         self.assertTrue(passing_run(['passes/text.html', 'passes/args.html',
    683                                      'virtual/passes/text.html', 'virtual/passes/args.html']))
    684 
    685     def test_reftest_run(self):
    686         tests_run = get_tests_run(['passes/reftest.html'])
    687         self.assertEqual(['passes/reftest.html'], tests_run)
    688 
    689     def test_reftest_run_reftests_if_pixel_tests_are_disabled(self):
    690         tests_run = get_tests_run(['--no-pixel-tests', 'passes/reftest.html'])
    691         self.assertEqual(['passes/reftest.html'], tests_run)
    692 
    693     def test_reftest_expected_html_should_be_ignored(self):
    694         tests_run = get_tests_run(['passes/reftest-expected.html'])
    695         self.assertEqual([], tests_run)
    696 
    697     def test_reftest_driver_should_run_expected_html(self):
    698         tests_run = get_test_results(['passes/reftest.html'])
    699         self.assertEqual(tests_run[0].references, ['passes/reftest-expected.html'])
    700 
    701     def test_reftest_driver_should_run_expected_mismatch_html(self):
    702         tests_run = get_test_results(['passes/mismatch.html'])
    703         self.assertEqual(tests_run[0].references, ['passes/mismatch-expected-mismatch.html'])
    704 
    705     def test_reftest_should_not_use_naming_convention_if_not_listed_in_reftestlist(self):
    706         host = MockHost()
    707         _, err, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host)
    708         results = parse_full_results(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))
    709 
    710         self.assertEqual(results["tests"]["reftests"]["foo"]["unlistedtest.html"]["actual"], "MISSING"),
    711         self.assertEqual(results["num_regressions"], 5)
    712         self.assertEqual(results["num_flaky"], 0)
    713 
    714     def test_additional_platform_directory(self):
    715         self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo']))
    716         self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/../foo']))
    717         self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo', '--additional-platform-directory', '/tmp/bar']))
    718         self.assertTrue(passing_run(['--additional-platform-directory', 'foo']))
    719 
    720     def test_additional_expectations(self):
    721         host = MockHost()
    722         host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) failures/unexpected/mismatch.html [ ImageOnlyFailure ]\n')
    723         self.assertTrue(passing_run(['--additional-expectations', '/tmp/overrides.txt', 'failures/unexpected/mismatch.html'],
    724                                     tests_included=True, host=host))
    725 
    726     @staticmethod
    727     def has_test_of_type(tests, type):
    728         return [test for test in tests if type in test]
    729 
    730     def test_platform_directories_ignored_when_searching_for_tests(self):
    731         tests_run = get_tests_run(['--platform', 'test-mac-leopard'])
    732         self.assertFalse('platform/test-mac-leopard/http/test.html' in tests_run)
    733         self.assertFalse('platform/test-win-win7/http/test.html' in tests_run)
    734 
    735     def test_platform_directories_not_searched_for_additional_tests(self):
    736         tests_run = get_tests_run(['--platform', 'test-mac-leopard', 'http'])
    737         self.assertFalse('platform/test-mac-leopard/http/test.html' in tests_run)
    738         self.assertFalse('platform/test-win-win7/http/test.html' in tests_run)
    739 
    740     def test_output_diffs(self):
    741         # Test to ensure that we don't generate -wdiff.html or -pretty.html if wdiff and PrettyPatch
    742         # aren't available.
    743         host = MockHost()
    744         _, err, _ = logging_run(['--pixel-tests', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
    745         written_files = host.filesystem.written_files
    746         self.assertTrue(any(path.endswith('-diff.txt') for path in written_files.keys()))
    747         self.assertFalse(any(path.endswith('-wdiff.html') for path in written_files.keys()))
    748         self.assertFalse(any(path.endswith('-pretty-diff.html') for path in written_files.keys()))
    749 
    750         full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
    751         full_results = json.loads(full_results_text.replace("ADD_RESULTS(", "").replace(");", ""))
    752         self.assertEqual(full_results['has_wdiff'], False)
    753         self.assertEqual(full_results['has_pretty_patch'], False)
    754 
    755     def test_unsupported_platform(self):
    756         stdout = StringIO.StringIO()
    757         stderr = StringIO.StringIO()
    758         res = run_webkit_tests.main(['--platform', 'foo'], stdout, stderr)
    759 
    760         self.assertEqual(res, run_webkit_tests.EXCEPTIONAL_EXIT_STATUS)
    761         self.assertEqual(stdout.getvalue(), '')
    762         self.assertTrue('unsupported platform' in stderr.getvalue())
    763 
    764     def test_build_check(self):
    765         # By using a port_name for a different platform than the one we're running on, the build check should always fail.
    766         if sys.platform == 'darwin':
    767             port_name = 'linux-x86'
    768         else:
    769             port_name = 'mac-lion'
    770         out = StringIO.StringIO()
    771         err = StringIO.StringIO()
    772         self.assertEqual(run_webkit_tests.main(['--platform', port_name, 'fast/harness/results.html'], out, err), -1)
    773 
    774     def test_verbose_in_child_processes(self):
    775         # When we actually run multiple processes, we may have to reconfigure logging in the
    776         # child process (e.g., on win32) and we need to make sure that works and we still
    777         # see the verbose log output. However, we can't use logging_run() because using
    778         # outputcapture to capture stdout and stderr latter results in a nonpicklable host.
    779 
    780         # Test is flaky on Windows: https://bugs.webkit.org/show_bug.cgi?id=98559
    781         if not self.should_test_processes:
    782             return
    783 
    784         options, parsed_args = parse_args(['--verbose', '--fully-parallel', '--child-processes', '2', 'passes/text.html', 'passes/image.html'], tests_included=True, print_nothing=False)
    785         host = MockHost()
    786         port_obj = host.port_factory.get(port_name=options.platform, options=options)
    787         logging_stream = StringIO.StringIO()
    788         run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
    789         self.assertTrue('text.html passed' in logging_stream.getvalue())
    790         self.assertTrue('image.html passed' in logging_stream.getvalue())
    791 
    792 
    793 class EndToEndTest(unittest.TestCase):
    794     def test_reftest_with_two_notrefs(self):
    795         # Test that we update expectations in place. If the expectation
    796         # is missing, update the expected generic location.
    797         host = MockHost()
    798         _, _, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host)
    799         file_list = host.filesystem.written_files.keys()
    800 
    801         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/failing_results.json')
    802         json = parse_full_results(json_string)
    803         self.assertTrue("multiple-match-success.html" not in json["tests"]["reftests"]["foo"])
    804         self.assertTrue("multiple-mismatch-success.html" not in json["tests"]["reftests"]["foo"])
    805         self.assertTrue("multiple-both-success.html" not in json["tests"]["reftests"]["foo"])
    806 
    807         self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-match-failure.html"],
    808             {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["=="], "is_unexpected": True})
    809         self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-mismatch-failure.html"],
    810             {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["!="], "is_unexpected": True})
    811         self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-both-failure.html"],
    812             {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["==", "!="], "is_unexpected": True})
    813 
    814 
    815 class RebaselineTest(unittest.TestCase, StreamTestingMixin):
    816     def assertBaselines(self, file_list, file, extensions, err):
    817         "assert that the file_list contains the baselines."""
    818         for ext in extensions:
    819             baseline = file + "-expected" + ext
    820             baseline_msg = 'Writing new expected result "%s"\n' % baseline
    821             self.assertTrue(any(f.find(baseline) != -1 for f in file_list))
    822             self.assertContains(err, baseline_msg)
    823 
    824     # FIXME: Add tests to ensure that we're *not* writing baselines when we're not
    825     # supposed to be.
    826 
    827     def test_reset_results(self):
    828         # Test that we update expectations in place. If the expectation
    829         # is missing, update the expected generic location.
    830         host = MockHost()
    831         details, err, _ = logging_run(
    832             ['--pixel-tests', '--reset-results', 'passes/image.html', 'failures/expected/missing_image.html'],
    833             tests_included=True, host=host, new_results=True)
    834         file_list = host.filesystem.written_files.keys()
    835         self.assertEqual(details.exit_code, 0)
    836         self.assertEqual(len(file_list), 8)
    837         self.assertBaselines(file_list, "passes/image", [".txt", ".png"], err)
    838         self.assertBaselines(file_list, "failures/expected/missing_image", [".txt", ".png"], err)
    839 
    840     def test_missing_results(self):
    841         # Test that we update expectations in place. If the expectation
    842         # is missing, update the expected generic location.
    843         host = MockHost()
    844         details, err, _ = logging_run(['--no-show-results',
    845             'failures/unexpected/missing_text.html',
    846             'failures/unexpected/missing_image.html',
    847             'failures/unexpected/missing_render_tree_dump.html'],
    848             tests_included=True, host=host, new_results=True)
    849         file_list = host.filesystem.written_files.keys()
    850         self.assertEqual(details.exit_code, 3)
    851         self.assertEqual(len(file_list), 10)
    852         self.assertBaselines(file_list, "failures/unexpected/missing_text", [".txt"], err)
    853         self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_image", [".png"], err)
    854         self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_render_tree_dump", [".txt"], err)
    855 
    856     def test_missing_results_not_added_if_expected_missing(self):
    857         # Test that we update expectations in place. If the expectation
    858         # is missing, update the expected generic location.
    859         host = MockHost()
    860         options, parsed_args = run_webkit_tests.parse_args([])
    861 
    862         port = test.TestPort(host, options=options)
    863         host.filesystem.write_text_file(port.path_to_generic_test_expectations_file(), """
    864 Bug(foo) failures/unexpected/missing_text.html [ Missing ]
    865 Bug(foo) failures/unexpected/missing_image.html [ NeedsRebaseline ]
    866 Bug(foo) failures/unexpected/missing_audio.html [ NeedsManualRebaseline ]
    867 Bug(foo) failures/unexpected/missing_render_tree_dump.html [ Missing ]
    868 """)
    869         details, err, _ = logging_run(['--no-show-results',
    870             'failures/unexpected/missing_text.html',
    871             'failures/unexpected/missing_image.html',
    872             'failures/unexpected/missing_audio.html',
    873             'failures/unexpected/missing_render_tree_dump.html'],
    874             tests_included=True, host=host, new_results=True,  port_obj=port)
    875         file_list = host.filesystem.written_files.keys()
    876         self.assertEqual(details.exit_code, 0)
    877         self.assertEqual(len(file_list), 7)
    878         self.assertFalse(any('failures/unexpected/missing_text-expected' in file for file in file_list))
    879         self.assertFalse(any('failures/unexpected/missing_image-expected' in file for file in file_list))
    880         self.assertFalse(any('failures/unexpected/missing_render_tree_dump-expected' in file for file in file_list))
    881 
    882     def test_missing_results_not_added_if_expected_missing_and_reset_results(self):
    883         # Test that we update expectations in place. If the expectation
    884         # is missing, update the expected generic location.
    885         host = MockHost()
    886         options, parsed_args = run_webkit_tests.parse_args(['--pixel-tests', '--reset-results'])
    887 
    888         port = test.TestPort(host, options=options)
    889         host.filesystem.write_text_file(port.path_to_generic_test_expectations_file(), """
    890 Bug(foo) failures/unexpected/missing_text.html [ Missing ]
    891 Bug(foo) failures/unexpected/missing_image.html [ NeedsRebaseline ]
    892 Bug(foo) failures/unexpected/missing_audio.html [ NeedsManualRebaseline ]
    893 Bug(foo) failures/unexpected/missing_render_tree_dump.html [ Missing ]
    894 """)
    895         details, err, _ = logging_run(['--pixel-tests', '--reset-results',
    896             'failures/unexpected/missing_text.html',
    897             'failures/unexpected/missing_image.html',
    898             'failures/unexpected/missing_audio.html',
    899             'failures/unexpected/missing_render_tree_dump.html'],
    900             tests_included=True, host=host, new_results=True,  port_obj=port)
    901         file_list = host.filesystem.written_files.keys()
    902         self.assertEqual(details.exit_code, 0)
    903         self.assertEqual(len(file_list), 11)
    904         self.assertBaselines(file_list, "failures/unexpected/missing_text", [".txt"], err)
    905         self.assertBaselines(file_list, "failures/unexpected/missing_image", [".png"], err)
    906         self.assertBaselines(file_list, "failures/unexpected/missing_render_tree_dump", [".txt"], err)
    907 
    908     def test_new_baseline(self):
    909         # Test that we update the platform expectations in the version-specific directories
    910         # for both existing and new baselines.
    911         host = MockHost()
    912         details, err, _ = logging_run(
    913             ['--pixel-tests', '--new-baseline', 'passes/image.html', 'failures/expected/missing_image.html'],
    914             tests_included=True, host=host, new_results=True)
    915         file_list = host.filesystem.written_files.keys()
    916         self.assertEqual(details.exit_code, 0)
    917         self.assertEqual(len(file_list), 8)
    918         self.assertBaselines(file_list,
    919             "platform/test-mac-leopard/passes/image", [".txt", ".png"], err)
    920         self.assertBaselines(file_list,
    921             "platform/test-mac-leopard/failures/expected/missing_image", [".txt", ".png"], err)
    922 
    923 
    924 class PortTest(unittest.TestCase):
    925     def assert_mock_port_works(self, port_name, args=[]):
    926         self.assertTrue(passing_run(args + ['--platform', 'mock-' + port_name, 'fast/harness/results.html'], tests_included=True, host=Host()))
    927 
    928     def disabled_test_mac_lion(self):
    929         self.assert_mock_port_works('mac-lion')
    930 
    931     def disabled_test_mac_lion_in_test_shell_mode(self):
    932         self.assert_mock_port_works('mac-lion', args=['--additional-drt-flag=--test-shell'])
    933 
    934     def disabled_test_qt_linux(self):
    935         self.assert_mock_port_works('qt-linux')
    936 
    937     def disabled_test_mac_lion(self):
    938         self.assert_mock_port_works('mac-lion')
    939 
    940 
    941 class MainTest(unittest.TestCase):
    942     def test_exception_handling(self):
    943         orig_run_fn = run_webkit_tests.run
    944 
    945         # unused args pylint: disable=W0613
    946         def interrupting_run(port, options, args, stderr):
    947             raise KeyboardInterrupt
    948 
    949         def successful_run(port, options, args, stderr):
    950 
    951             class FakeRunDetails(object):
    952                 exit_code = -1
    953 
    954             return FakeRunDetails()
    955 
    956         def exception_raising_run(port, options, args, stderr):
    957             assert False
    958 
    959         stdout = StringIO.StringIO()
    960         stderr = StringIO.StringIO()
    961         try:
    962             run_webkit_tests.run = interrupting_run
    963             res = run_webkit_tests.main([], stdout, stderr)
    964             self.assertEqual(res, run_webkit_tests.INTERRUPTED_EXIT_STATUS)
    965 
    966             run_webkit_tests.run = successful_run
    967             res = run_webkit_tests.main(['--platform', 'test'], stdout, stderr)
    968             self.assertEqual(res, -1)
    969 
    970             run_webkit_tests.run = exception_raising_run
    971             res = run_webkit_tests.main([], stdout, stderr)
    972             self.assertEqual(res, run_webkit_tests.EXCEPTIONAL_EXIT_STATUS)
    973         finally:
    974             run_webkit_tests.run = orig_run_fn
    975