Home | History | Annotate | Download | only in layout_tests
      1 #!/usr/bin/python
      2 # Copyright (C) 2010 Google Inc. All rights reserved.
      3 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor (at] inf.u-szeged.hu), University of Szeged
      4 #
      5 # Redistribution and use in source and binary forms, with or without
      6 # modification, are permitted provided that the following conditions are
      7 # met:
      8 #
      9 #     * Redistributions of source code must retain the above copyright
     10 # notice, this list of conditions and the following disclaimer.
     11 #     * Redistributions in binary form must reproduce the above
     12 # copyright notice, this list of conditions and the following disclaimer
     13 # in the documentation and/or other materials provided with the
     14 # distribution.
     15 #     * Neither the name of Google Inc. nor the names of its
     16 # contributors may be used to endorse or promote products derived from
     17 # this software without specific prior written permission.
     18 #
     19 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     20 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     21 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     22 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     23 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     24 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     25 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     26 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     27 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     28 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     30 
     31 """Unit tests for run_webkit_tests."""
     32 
     33 from __future__ import with_statement
     34 
     35 import codecs
     36 import itertools
     37 import logging
     38 import os
     39 import Queue
     40 import sys
     41 import thread
     42 import time
     43 import threading
     44 import unittest
     45 
     46 try:
     47     import multiprocessing
     48 except ImportError:
     49     multiprocessing = None
     50 
     51 from webkitpy.common import array_stream
     52 from webkitpy.common.system import outputcapture
     53 from webkitpy.common.system import filesystem_mock
     54 from webkitpy.tool import mocktool
     55 from webkitpy.layout_tests import port
     56 from webkitpy.layout_tests import run_webkit_tests
     57 from webkitpy.layout_tests.port.test import TestPort, TestDriver
     58 from webkitpy.layout_tests.port.test_files import is_reference_html_file
     59 from webkitpy.python24.versioning import compare_version
     60 from webkitpy.test.skip import skip_if
     61 
     62 from webkitpy.thirdparty.mock import Mock
     63 
     64 
     65 def parse_args(extra_args=None, record_results=False, tests_included=False,
     66                print_nothing=True):
     67     extra_args = extra_args or []
     68     if print_nothing:
     69         args = ['--print', 'nothing']
     70     else:
     71         args = []
     72     if not '--platform' in extra_args:
     73         args.extend(['--platform', 'test'])
     74     if not record_results:
     75         args.append('--no-record-results')
     76     if not '--child-processes' in extra_args and not '--worker-model' in extra_args:
     77         args.extend(['--worker-model', 'inline'])
     78     args.extend(extra_args)
     79     if not tests_included:
     80         # We use the glob to test that globbing works.
     81         args.extend(['passes',
     82                      'http/tests',
     83                      'websocket/tests',
     84                      'failures/expected/*'])
     85     return run_webkit_tests.parse_args(args)
     86 
     87 
     88 def passing_run(extra_args=None, port_obj=None, record_results=False,
     89                 tests_included=False, filesystem=None):
     90     options, parsed_args = parse_args(extra_args, record_results,
     91                                       tests_included)
     92     if not port_obj:
     93         port_obj = port.get(port_name=options.platform, options=options,
     94                             user=mocktool.MockUser(), filesystem=filesystem)
     95     res = run_webkit_tests.run(port_obj, options, parsed_args)
     96     return res == 0
     97 
     98 
     99 def logging_run(extra_args=None, port_obj=None, record_results=False, tests_included=False, filesystem=None):
    100     options, parsed_args = parse_args(extra_args=extra_args,
    101                                       record_results=record_results,
    102                                       tests_included=tests_included,
    103                                       print_nothing=False)
    104     user = mocktool.MockUser()
    105     if not port_obj:
    106         port_obj = port.get(port_name=options.platform, options=options,
    107                             user=user, filesystem=filesystem)
    108 
    109     res, buildbot_output, regular_output = run_and_capture(port_obj, options,
    110                                                            parsed_args)
    111     return (res, buildbot_output, regular_output, user)
    112 
    113 
    114 def run_and_capture(port_obj, options, parsed_args):
    115     oc = outputcapture.OutputCapture()
    116     try:
    117         oc.capture_output()
    118         buildbot_output = array_stream.ArrayStream()
    119         regular_output = array_stream.ArrayStream()
    120         res = run_webkit_tests.run(port_obj, options, parsed_args,
    121                                    buildbot_output=buildbot_output,
    122                                    regular_output=regular_output)
    123     finally:
    124         oc.restore_output()
    125     return (res, buildbot_output, regular_output)
    126 
    127 
    128 def get_tests_run(extra_args=None, tests_included=False, flatten_batches=False,
    129                   filesystem=None, include_reference_html=False):
    130     extra_args = extra_args or []
    131     if not tests_included:
    132         # Not including http tests since they get run out of order (that
    133         # behavior has its own test, see test_get_test_file_queue)
    134         extra_args = ['passes', 'failures'] + extra_args
    135     options, parsed_args = parse_args(extra_args, tests_included=True)
    136 
    137     user = mocktool.MockUser()
    138 
    139     test_batches = []
    140 
    141 
    142     class RecordingTestDriver(TestDriver):
    143         def __init__(self, port, worker_number):
    144             TestDriver.__init__(self, port, worker_number)
    145             self._current_test_batch = None
    146 
    147         def poll(self):
    148             # So that we don't create a new driver for every test
    149             return None
    150 
    151         def stop(self):
    152             self._current_test_batch = None
    153 
    154         def run_test(self, test_input):
    155             if self._current_test_batch is None:
    156                 self._current_test_batch = []
    157                 test_batches.append(self._current_test_batch)
    158             test_name = self._port.relative_test_filename(test_input.filename)
    159             # In case of reftest, one test calls the driver's run_test() twice.
    160             # We should not add a reference html used by reftests to tests unless include_reference_html parameter
    161             # is explicitly given.
    162             if include_reference_html or not is_reference_html_file(test_input.filename):
    163                 self._current_test_batch.append(test_name)
    164             return TestDriver.run_test(self, test_input)
    165 
    166     class RecordingTestPort(TestPort):
    167         def create_driver(self, worker_number):
    168             return RecordingTestDriver(self, worker_number)
    169 
    170     recording_port = RecordingTestPort(options=options, user=user, filesystem=filesystem)
    171     run_and_capture(recording_port, options, parsed_args)
    172 
    173     if flatten_batches:
    174         return list(itertools.chain(*test_batches))
    175 
    176     return test_batches
    177 
    178 
    179 class MainTest(unittest.TestCase):
    180     def test_accelerated_compositing(self):
    181         # This just tests that we recognize the command line args
    182         self.assertTrue(passing_run(['--accelerated-compositing']))
    183         self.assertTrue(passing_run(['--no-accelerated-compositing']))
    184 
    185     def test_accelerated_2d_canvas(self):
    186         # This just tests that we recognize the command line args
    187         self.assertTrue(passing_run(['--accelerated-2d-canvas']))
    188         self.assertTrue(passing_run(['--no-accelerated-2d-canvas']))
    189 
    190     def test_basic(self):
    191         self.assertTrue(passing_run())
    192 
    193     def test_batch_size(self):
    194         batch_tests_run = get_tests_run(['--batch-size', '2'])
    195         for batch in batch_tests_run:
    196             self.assertTrue(len(batch) <= 2, '%s had too many tests' % ', '.join(batch))
    197 
    198     def test_child_process_1(self):
    199         _, _, regular_output, _ = logging_run(
    200              ['--print', 'config', '--worker-model', 'threads', '--child-processes', '1'])
    201         self.assertTrue(any(['Running 1 ' in line for line in regular_output.get()]))
    202 
    203     def test_child_processes_2(self):
    204         _, _, regular_output, _ = logging_run(
    205              ['--print', 'config', '--worker-model', 'threads', '--child-processes', '2'])
    206         self.assertTrue(any(['Running 2 ' in line for line in regular_output.get()]))
    207 
    208     def test_child_processes_min(self):
    209         _, _, regular_output, _ = logging_run(
    210              ['--print', 'config', '--worker-model', 'threads', '--child-processes', '2', 'passes'],
    211              tests_included=True)
    212         self.assertTrue(any(['Running 1 ' in line for line in regular_output.get()]))
    213 
    214     def test_dryrun(self):
    215         batch_tests_run = get_tests_run(['--dry-run'])
    216         self.assertEqual(batch_tests_run, [])
    217 
    218         batch_tests_run = get_tests_run(['-n'])
    219         self.assertEqual(batch_tests_run, [])
    220 
    221     def test_exception_raised(self):
    222         self.assertRaises(ValueError, logging_run,
    223             ['failures/expected/exception.html'], tests_included=True)
    224 
    225     def test_full_results_html(self):
    226         # FIXME: verify html?
    227         res, out, err, user = logging_run(['--full-results-html'])
    228         self.assertEqual(res, 0)
    229 
    230     def test_help_printing(self):
    231         res, out, err, user = logging_run(['--help-printing'])
    232         self.assertEqual(res, 0)
    233         self.assertTrue(out.empty())
    234         self.assertFalse(err.empty())
    235 
    236     def test_hung_thread(self):
    237         res, out, err, user = logging_run(['--run-singly', '--time-out-ms=50',
    238                                           'failures/expected/hang.html'],
    239                                           tests_included=True)
    240         self.assertEqual(res, 0)
    241         self.assertFalse(out.empty())
    242         self.assertFalse(err.empty())
    243 
    244     def test_keyboard_interrupt(self):
    245         # Note that this also tests running a test marked as SKIP if
    246         # you specify it explicitly.
    247         self.assertRaises(KeyboardInterrupt, logging_run,
    248             ['failures/expected/keyboard.html'], tests_included=True)
    249 
    250     def test_keyboard_interrupt_inline_worker_model(self):
    251         self.assertRaises(KeyboardInterrupt, logging_run,
    252             ['failures/expected/keyboard.html', '--worker-model', 'inline'],
    253             tests_included=True)
    254 
    255     def test_last_results(self):
    256         fs = port.unit_test_filesystem()
    257         # We do a logging run here instead of a passing run in order to
    258         # suppress the output from the json generator.
    259         res, buildbot_output, regular_output, user = logging_run(['--clobber-old-results'], record_results=True, filesystem=fs)
    260         res, buildbot_output, regular_output, user = logging_run(
    261             ['--print-last-failures'], filesystem=fs)
    262         self.assertEqual(regular_output.get(), ['\n\n'])
    263         self.assertEqual(buildbot_output.get(), [])
    264 
    265     def test_lint_test_files(self):
    266         res, out, err, user = logging_run(['--lint-test-files'])
    267         self.assertEqual(res, 0)
    268         self.assertTrue(out.empty())
    269         self.assertTrue(any(['Lint succeeded' in msg for msg in err.get()]))
    270 
    271     def test_lint_test_files__errors(self):
    272         options, parsed_args = parse_args(['--lint-test-files'])
    273         user = mocktool.MockUser()
    274         port_obj = port.get(options.platform, options=options, user=user)
    275         port_obj.test_expectations = lambda: "# syntax error"
    276         res, out, err = run_and_capture(port_obj, options, parsed_args)
    277 
    278         self.assertEqual(res, -1)
    279         self.assertTrue(out.empty())
    280         self.assertTrue(any(['Lint failed' in msg for msg in err.get()]))
    281 
    282     def test_no_tests_found(self):
    283         res, out, err, user = logging_run(['resources'], tests_included=True)
    284         self.assertEqual(res, -1)
    285         self.assertTrue(out.empty())
    286         self.assertTrue('No tests to run.\n' in err.get())
    287 
    288     def test_no_tests_found_2(self):
    289         res, out, err, user = logging_run(['foo'], tests_included=True)
    290         self.assertEqual(res, -1)
    291         self.assertTrue(out.empty())
    292         self.assertTrue('No tests to run.\n' in err.get())
    293 
    294     def test_randomize_order(self):
    295         # FIXME: verify order was shuffled
    296         self.assertTrue(passing_run(['--randomize-order']))
    297 
    298     def test_run_chunk(self):
    299         # Test that we actually select the right chunk
    300         all_tests_run = get_tests_run(flatten_batches=True)
    301         chunk_tests_run = get_tests_run(['--run-chunk', '1:4'], flatten_batches=True)
    302         self.assertEquals(all_tests_run[4:8], chunk_tests_run)
    303 
    304         # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
    305         tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
    306         chunk_tests_run = get_tests_run(['--run-chunk', '1:3'] + tests_to_run, tests_included=True, flatten_batches=True)
    307         self.assertEquals(['passes/text.html', 'passes/error.html', 'passes/image.html'], chunk_tests_run)
    308 
    309     def test_run_force(self):
    310         # This raises an exception because we run
    311         # failures/expected/exception.html, which is normally SKIPped.
    312         self.assertRaises(ValueError, logging_run, ['--force'])
    313 
    314     def test_run_part(self):
    315         # Test that we actually select the right part
    316         tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
    317         tests_run = get_tests_run(['--run-part', '1:2'] + tests_to_run, tests_included=True, flatten_batches=True)
    318         self.assertEquals(['passes/error.html', 'passes/image.html'], tests_run)
    319 
    320         # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
    321         # (here we end up with 3 parts, each with 2 tests, and we only have 4 tests total, so the
    322         # last part repeats the first two tests).
    323         chunk_tests_run = get_tests_run(['--run-part', '3:3'] + tests_to_run, tests_included=True, flatten_batches=True)
    324         self.assertEquals(['passes/error.html', 'passes/image.html'], chunk_tests_run)
    325 
    326     def test_run_singly(self):
    327         batch_tests_run = get_tests_run(['--run-singly'])
    328         for batch in batch_tests_run:
    329             self.assertEquals(len(batch), 1, '%s had too many tests' % ', '.join(batch))
    330 
    331     def test_run_singly_actually_runs_tests(self):
    332         res, _, _, _ = logging_run(['--run-singly', 'failures/unexpected'])
    333         self.assertEquals(res, 5)
    334 
    335     def test_single_file(self):
    336         tests_run = get_tests_run(['passes/text.html'], tests_included=True, flatten_batches=True)
    337         self.assertEquals(['passes/text.html'], tests_run)
    338 
    339     def test_single_file_with_prefix(self):
    340         tests_run = get_tests_run(['LayoutTests/passes/text.html'], tests_included=True, flatten_batches=True)
    341         self.assertEquals(['passes/text.html'], tests_run)
    342 
    343     def test_single_skipped_file(self):
    344         tests_run = get_tests_run(['failures/expected/keybaord.html'], tests_included=True, flatten_batches=True)
    345         self.assertEquals([], tests_run)
    346 
    347     def test_stderr_is_saved(self):
    348         fs = port.unit_test_filesystem()
    349         self.assertTrue(passing_run(filesystem=fs))
    350         self.assertEquals(fs.read_text_file('/tmp/layout-test-results/passes/error-stderr.txt'),
    351                           'stuff going to stderr')
    352 
    353     def test_test_list(self):
    354         fs = port.unit_test_filesystem()
    355         filename = '/tmp/foo.txt'
    356         fs.write_text_file(filename, 'passes/text.html')
    357         tests_run = get_tests_run(['--test-list=%s' % filename], tests_included=True, flatten_batches=True, filesystem=fs)
    358         self.assertEquals(['passes/text.html'], tests_run)
    359         fs.remove(filename)
    360         res, out, err, user = logging_run(['--test-list=%s' % filename],
    361                                           tests_included=True, filesystem=fs)
    362         self.assertEqual(res, -1)
    363         self.assertFalse(err.empty())
    364 
    365     def test_test_list_with_prefix(self):
    366         fs = port.unit_test_filesystem()
    367         filename = '/tmp/foo.txt'
    368         fs.write_text_file(filename, 'LayoutTests/passes/text.html')
    369         tests_run = get_tests_run(['--test-list=%s' % filename], tests_included=True, flatten_batches=True, filesystem=fs)
    370         self.assertEquals(['passes/text.html'], tests_run)
    371 
    372     def test_unexpected_failures(self):
    373         # Run tests including the unexpected failures.
    374         self._url_opened = None
    375         res, out, err, user = logging_run(tests_included=True)
    376 
    377         # Update this magic number if you add an unexpected test to webkitpy.layout_tests.port.test
    378         # FIXME: It's nice to have a routine in port/test.py that returns this number.
    379         unexpected_tests_count = 5
    380 
    381         self.assertEqual(res, unexpected_tests_count)
    382         self.assertFalse(out.empty())
    383         self.assertFalse(err.empty())
    384         self.assertEqual(user.opened_urls, ['/tmp/layout-test-results/results.html'])
    385 
    386     def test_exit_after_n_failures_upload(self):
    387         fs = port.unit_test_filesystem()
    388         res, buildbot_output, regular_output, user = logging_run([
    389                 'failures/unexpected/text-image-checksum.html',
    390                 'passes/text.html',
    391                 '--exit-after-n-failures', '1',
    392             ],
    393             tests_included=True,
    394             record_results=True,
    395             filesystem=fs)
    396         self.assertTrue('/tmp/layout-test-results/incremental_results.json' in fs.files)
    397 
    398     def test_exit_after_n_failures(self):
    399         # Unexpected failures should result in tests stopping.
    400         tests_run = get_tests_run([
    401                 'failures/unexpected/text-image-checksum.html',
    402                 'passes/text.html',
    403                 '--exit-after-n-failures', '1',
    404             ],
    405             tests_included=True,
    406             flatten_batches=True)
    407         self.assertEquals(['failures/unexpected/text-image-checksum.html'], tests_run)
    408 
    409         # But we'll keep going for expected ones.
    410         tests_run = get_tests_run([
    411                 'failures/expected/text.html',
    412                 'passes/text.html',
    413                 '--exit-after-n-failures', '1',
    414             ],
    415             tests_included=True,
    416             flatten_batches=True)
    417         self.assertEquals(['failures/expected/text.html', 'passes/text.html'], tests_run)
    418 
    419     def test_exit_after_n_crashes(self):
    420         # Unexpected crashes should result in tests stopping.
    421         tests_run = get_tests_run([
    422                 'failures/unexpected/crash.html',
    423                 'passes/text.html',
    424                 '--exit-after-n-crashes-or-timeouts', '1',
    425             ],
    426             tests_included=True,
    427             flatten_batches=True)
    428         self.assertEquals(['failures/unexpected/crash.html'], tests_run)
    429 
    430         # Same with timeouts.
    431         tests_run = get_tests_run([
    432                 'failures/unexpected/timeout.html',
    433                 'passes/text.html',
    434                 '--exit-after-n-crashes-or-timeouts', '1',
    435             ],
    436             tests_included=True,
    437             flatten_batches=True)
    438         self.assertEquals(['failures/unexpected/timeout.html'], tests_run)
    439 
    440         # But we'll keep going for expected ones.
    441         tests_run = get_tests_run([
    442                 'failures/expected/crash.html',
    443                 'passes/text.html',
    444                 '--exit-after-n-crashes-or-timeouts', '1',
    445             ],
    446             tests_included=True,
    447             flatten_batches=True)
    448         self.assertEquals(['failures/expected/crash.html', 'passes/text.html'], tests_run)
    449 
    450     def test_exit_after_n_crashes_inline_worker_model(self):
    451         tests_run = get_tests_run([
    452                 'failures/unexpected/timeout.html',
    453                 'passes/text.html',
    454                 '--exit-after-n-crashes-or-timeouts', '1',
    455                 '--worker-model', 'inline',
    456             ],
    457             tests_included=True,
    458             flatten_batches=True)
    459         self.assertEquals(['failures/unexpected/timeout.html'], tests_run)
    460 
    461     def test_results_directory_absolute(self):
    462         # We run a configuration that should fail, to generate output, then
    463         # look for what the output results url was.
    464 
    465         fs = port.unit_test_filesystem()
    466         with fs.mkdtemp() as tmpdir:
    467             res, out, err, user = logging_run(['--results-directory=' + str(tmpdir)],
    468                                               tests_included=True, filesystem=fs)
    469             self.assertEqual(user.opened_urls, [fs.join(tmpdir, 'results.html')])
    470 
    471     def test_results_directory_default(self):
    472         # We run a configuration that should fail, to generate output, then
    473         # look for what the output results url was.
    474 
    475         # This is the default location.
    476         res, out, err, user = logging_run(tests_included=True)
    477         self.assertEqual(user.opened_urls, ['/tmp/layout-test-results/results.html'])
    478 
    479     def test_results_directory_relative(self):
    480         # We run a configuration that should fail, to generate output, then
    481         # look for what the output results url was.
    482         fs = port.unit_test_filesystem()
    483         fs.maybe_make_directory('/tmp/cwd')
    484         fs.chdir('/tmp/cwd')
    485         res, out, err, user = logging_run(['--results-directory=foo'],
    486                                           tests_included=True, filesystem=fs)
    487         self.assertEqual(user.opened_urls, ['/tmp/cwd/foo/results.html'])
    488 
    489     # These next tests test that we run the tests in ascending alphabetical
    490     # order per directory. HTTP tests are sharded separately from other tests,
    491     # so we have to test both.
    492     def assert_run_order(self, worker_model, child_processes='1'):
    493         tests_run = get_tests_run(['--worker-model', worker_model,
    494             '--child-processes', child_processes, 'passes'],
    495             tests_included=True, flatten_batches=True)
    496         self.assertEquals(tests_run, sorted(tests_run))
    497 
    498         tests_run = get_tests_run(['--worker-model', worker_model,
    499             '--child-processes', child_processes, 'http/tests/passes'],
    500             tests_included=True, flatten_batches=True)
    501         self.assertEquals(tests_run, sorted(tests_run))
    502 
    503     def test_run_order__inline(self):
    504         self.assert_run_order('inline')
    505 
    506     def test_tolerance(self):
    507         class ImageDiffTestPort(TestPort):
    508             def diff_image(self, expected_contents, actual_contents,
    509                    diff_filename=None):
    510                 self.tolerance_used_for_diff_image = self._options.tolerance
    511                 return True
    512 
    513         def get_port_for_run(args):
    514             options, parsed_args = run_webkit_tests.parse_args(args)
    515             test_port = ImageDiffTestPort(options=options, user=mocktool.MockUser())
    516             passing_run(args, port_obj=test_port, tests_included=True)
    517             return test_port
    518 
    519         base_args = ['--pixel-tests', 'failures/expected/*']
    520 
    521         # If we pass in an explicit tolerance argument, then that will be used.
    522         test_port = get_port_for_run(base_args + ['--tolerance', '.1'])
    523         self.assertEqual(0.1, test_port.tolerance_used_for_diff_image)
    524         test_port = get_port_for_run(base_args + ['--tolerance', '0'])
    525         self.assertEqual(0, test_port.tolerance_used_for_diff_image)
    526 
    527         # Otherwise the port's default tolerance behavior (including ignoring it)
    528         # should be used.
    529         test_port = get_port_for_run(base_args)
    530         self.assertEqual(None, test_port.tolerance_used_for_diff_image)
    531 
    532     def test_worker_model__inline(self):
    533         self.assertTrue(passing_run(['--worker-model', 'inline']))
    534 
    535     def test_worker_model__inline_with_child_processes(self):
    536         res, out, err, user = logging_run(['--worker-model', 'inline',
    537                                            '--child-processes', '2'])
    538         self.assertEqual(res, 0)
    539         self.assertTrue('--worker-model=inline overrides --child-processes\n' in err.get())
    540 
    541     def test_worker_model__processes(self):
    542         # FIXME: remove this when we fix test-webkitpy to work properly
    543         # with the multiprocessing module (bug 54520).
    544         if multiprocessing and sys.platform not in ('cygwin', 'win32'):
    545             self.assertTrue(passing_run(['--worker-model', 'processes']))
    546 
    547     def test_worker_model__processes_and_dry_run(self):
    548         if multiprocessing and sys.platform not in ('cygwin', 'win32'):
    549             self.assertTrue(passing_run(['--worker-model', 'processes', '--dry-run']))
    550 
    551     def test_worker_model__threads(self):
    552         self.assertTrue(passing_run(['--worker-model', 'threads']))
    553 
    554     def test_worker_model__unknown(self):
    555         self.assertRaises(ValueError, logging_run,
    556                           ['--worker-model', 'unknown'])
    557 
    558     def test_reftest_run(self):
    559         tests_run = get_tests_run(['passes/reftest.html'], tests_included=True, flatten_batches=True)
    560         self.assertEquals(['passes/reftest.html'], tests_run)
    561 
    562     def test_reftest_expected_html_should_be_ignored(self):
    563         tests_run = get_tests_run(['passes/reftest-expected.html'], tests_included=True, flatten_batches=True)
    564         self.assertEquals([], tests_run)
    565 
    566     def test_reftest_driver_should_run_expected_html(self):
    567         tests_run = get_tests_run(['passes/reftest.html'], tests_included=True, flatten_batches=True,
    568                                   include_reference_html=True)
    569         self.assertEquals(['passes/reftest.html', 'passes/reftest-expected.html'], tests_run)
    570 
    571     def test_reftest_driver_should_run_expected_mismatch_html(self):
    572         tests_run = get_tests_run(['passes/mismatch.html'], tests_included=True, flatten_batches=True,
    573                                   include_reference_html=True)
    574         self.assertEquals(['passes/mismatch.html', 'passes/mismatch-expected-mismatch.html'], tests_run)
    575 
    576     def test_additional_platform_directory(self):
    577         self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo']))
    578         self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/../foo']))
    579         self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo',
    580             '--additional-platform-directory', '/tmp/bar']))
    581 
    582         res, buildbot_output, regular_output, user = logging_run(
    583              ['--additional-platform-directory', 'foo'])
    584         self.assertTrue('--additional-platform-directory=foo is ignored since it is not absolute\n'
    585                         in regular_output.get())
    586 
    587 
    588 MainTest = skip_if(MainTest, sys.platform == 'cygwin' and compare_version(sys, '2.6')[0] < 0, 'new-run-webkit-tests tests hang on Cygwin Python 2.5.2')
    589 
    590 
    591 class RebaselineTest(unittest.TestCase):
    592     def assertBaselines(self, file_list, file):
    593         "assert that the file_list contains the baselines."""
    594         for ext in [".txt", ".png", ".checksum"]:
    595             baseline = file + "-expected" + ext
    596             self.assertTrue(any(f.find(baseline) != -1 for f in file_list))
    597 
    598     # FIXME: Add tests to ensure that we're *not* writing baselines when we're not
    599     # supposed to be.
    600 
    601     def test_reset_results(self):
    602         # Test that we update expectations in place. If the expectation
    603         # is missing, update the expected generic location.
    604         fs = port.unit_test_filesystem()
    605         passing_run(['--pixel-tests',
    606                         '--reset-results',
    607                         'passes/image.html',
    608                         'failures/expected/missing_image.html'],
    609                         tests_included=True, filesystem=fs)
    610         file_list = fs.written_files.keys()
    611         file_list.remove('/tmp/layout-test-results/tests_run0.txt')
    612         self.assertEqual(len(file_list), 6)
    613         self.assertBaselines(file_list,
    614             "/passes/image")
    615         self.assertBaselines(file_list,
    616             "/failures/expected/missing_image")
    617 
    618     def test_new_baseline(self):
    619         # Test that we update the platform expectations. If the expectation
    620         # is mssing, then create a new expectation in the platform dir.
    621         fs = port.unit_test_filesystem()
    622         passing_run(['--pixel-tests',
    623                         '--new-baseline',
    624                         'passes/image.html',
    625                         'failures/expected/missing_image.html'],
    626                     tests_included=True, filesystem=fs)
    627         file_list = fs.written_files.keys()
    628         file_list.remove('/tmp/layout-test-results/tests_run0.txt')
    629         self.assertEqual(len(file_list), 6)
    630         self.assertBaselines(file_list,
    631             "/platform/test-mac-leopard/passes/image")
    632         self.assertBaselines(file_list,
    633             "/platform/test-mac-leopard/failures/expected/missing_image")
    634 
    635 
    636 class DryrunTest(unittest.TestCase):
    637     # FIXME: it's hard to know which platforms are safe to test; the
    638     # chromium platforms require a chromium checkout, and the mac platform
    639     # requires fcntl, so it can't be tested on win32, etc. There is
    640     # probably a better way of handling this.
    641     def disabled_test_darwin(self):
    642         if sys.platform != "darwin":
    643             return
    644 
    645         self.assertTrue(passing_run(['--platform', 'dryrun', 'fast/html'],
    646                         tests_included=True))
    647         self.assertTrue(passing_run(['--platform', 'dryrun-mac', 'fast/html'],
    648                         tests_included=True))
    649 
    650     def test_test(self):
    651         self.assertTrue(passing_run(['--platform', 'dryrun-test',
    652                                            '--pixel-tests']))
    653 
    654 
    655 if __name__ == '__main__':
    656     unittest.main()
    657