Home | History | Annotate | Download | only in layout_tests
      1 # Copyright (C) 2010 Google Inc. All rights reserved.
      2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor (at] inf.u-szeged.hu), University of Szeged
      3 # Copyright (C) 2011 Apple Inc. All rights reserved.
      4 #
      5 # Redistribution and use in source and binary forms, with or without
      6 # modification, are permitted provided that the following conditions are
      7 # met:
      8 #
      9 #     * Redistributions of source code must retain the above copyright
     10 # notice, this list of conditions and the following disclaimer.
     11 #     * Redistributions in binary form must reproduce the above
     12 # copyright notice, this list of conditions and the following disclaimer
     13 # in the documentation and/or other materials provided with the
     14 # distribution.
     15 #     * Neither the name of Google Inc. nor the names of its
     16 # contributors may be used to endorse or promote products derived from
     17 # this software without specific prior written permission.
     18 #
     19 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     20 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     21 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     22 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     23 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     24 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     25 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     26 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     27 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     28 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     30 
     31 import logging
     32 import optparse
     33 import os
     34 import signal
     35 import sys
     36 import traceback
     37 
     38 from webkitpy.common.host import Host
     39 from webkitpy.layout_tests.controllers.manager import Manager
     40 from webkitpy.layout_tests.port import configuration_options, platform_options
     41 from webkitpy.layout_tests.views import buildbot_results
     42 from webkitpy.layout_tests.views import printing
     43 
     44 
     45 _log = logging.getLogger(__name__)
     46 
     47 
     48 # This mirrors what the shell normally does.
     49 INTERRUPTED_EXIT_STATUS = signal.SIGINT + 128
     50 
     51 # This is a randomly chosen exit code that can be tested against to
     52 # indicate that an unexpected exception occurred.
     53 EXCEPTIONAL_EXIT_STATUS = 254
     54 
     55 
     56 def main(argv, stdout, stderr):
     57     options, args = parse_args(argv)
     58 
     59     if options.platform and 'test' in options.platform:
     60         # It's a bit lame to import mocks into real code, but this allows the user
     61         # to run tests against the test platform interactively, which is useful for
     62         # debugging test failures.
     63         from webkitpy.common.host_mock import MockHost
     64         host = MockHost()
     65     else:
     66         host = Host()
     67 
     68     if options.lint_test_files:
     69         from webkitpy.layout_tests.lint_test_expectations import lint
     70         return lint(host, options, stderr)
     71 
     72     try:
     73         port = host.port_factory.get(options.platform, options)
     74     except NotImplementedError, e:
     75         # FIXME: is this the best way to handle unsupported port names?
     76         print >> stderr, str(e)
     77         return EXCEPTIONAL_EXIT_STATUS
     78 
     79     try:
     80         run_details = run(port, options, args, stderr)
     81         if run_details.exit_code != -1:
     82             bot_printer = buildbot_results.BuildBotPrinter(stdout, options.debug_rwt_logging)
     83             bot_printer.print_results(run_details)
     84 
     85         return run_details.exit_code
     86     except KeyboardInterrupt:
     87         return INTERRUPTED_EXIT_STATUS
     88     except BaseException as e:
     89         if isinstance(e, Exception):
     90             print >> stderr, '\n%s raised: %s' % (e.__class__.__name__, str(e))
     91             traceback.print_exc(file=stderr)
     92         return EXCEPTIONAL_EXIT_STATUS
     93 
     94 
     95 def parse_args(args):
     96     option_group_definitions = []
     97 
     98     option_group_definitions.append(("Platform options", platform_options()))
     99     option_group_definitions.append(("Configuration options", configuration_options()))
    100     option_group_definitions.append(("Printing Options", printing.print_options()))
    101 
    102     # FIXME: These options should move onto the ChromiumPort.
    103     option_group_definitions.append(("Chromium-specific Options", [
    104         optparse.make_option("--nocheck-sys-deps", action="store_true",
    105             default=False,
    106             help="Don't check the system dependencies (themes)"),
    107         optparse.make_option("--adb-device",
    108             action="append", default=[],
    109             help="Run Android layout tests on these devices."),
    110     ]))
    111 
    112     option_group_definitions.append(("Results Options", [
    113         optparse.make_option("-p", "--pixel", "--pixel-tests", action="store_true",
    114             dest="pixel_tests", help="Enable pixel-to-pixel PNG comparisons"),
    115         optparse.make_option("--no-pixel", "--no-pixel-tests", action="store_false",
    116             dest="pixel_tests", help="Disable pixel-to-pixel PNG comparisons"),
    117         optparse.make_option("--results-directory", help="Location of test results"),
    118         optparse.make_option("--build-directory",
    119             help="Path to the directory under which build files are kept (should not include configuration)"),
    120         optparse.make_option("--add-platform-exceptions", action="store_true", default=False,
    121             help="Save generated results into the *most-specific-platform* directory rather than the *generic-platform* directory"),
    122         optparse.make_option("--new-baseline", action="store_true",
    123             default=False, help="Save generated results as new baselines "
    124                  "into the *most-specific-platform* directory, overwriting whatever's "
    125                  "already there. Equivalent to --reset-results --add-platform-exceptions"),
    126         optparse.make_option("--reset-results", action="store_true",
    127             default=False, help="Reset expectations to the "
    128                  "generated results in their existing location."),
    129         optparse.make_option("--no-new-test-results", action="store_false",
    130             dest="new_test_results", default=True,
    131             help="Don't create new baselines when no expected results exist"),
    132 
    133         #FIXME: we should support a comma separated list with --pixel-test-directory as well.
    134         optparse.make_option("--pixel-test-directory", action="append", default=[], dest="pixel_test_directories",
    135             help="A directory where it is allowed to execute tests as pixel tests. "
    136                  "Specify multiple times to add multiple directories. "
    137                  "This option implies --pixel-tests. If specified, only those tests "
    138                  "will be executed as pixel tests that are located in one of the "
    139                  "directories enumerated with the option. Some ports may ignore this "
    140                  "option while others can have a default value that can be overridden here."),
    141 
    142         optparse.make_option("--skip-failing-tests", action="store_true",
    143             default=False, help="Skip tests that are expected to fail. "
    144                  "Note: When using this option, you might miss new crashes "
    145                  "in these tests."),
    146         optparse.make_option("--additional-drt-flag", action="append",
    147             default=[], help="Additional command line flag to pass to the driver "
    148                  "Specify multiple times to add multiple flags."),
    149         optparse.make_option("--driver-name", type="string",
    150             help="Alternative driver binary to use"),
    151         optparse.make_option("--additional-platform-directory", action="append",
    152             default=[], help="Additional directory where to look for test "
    153                  "baselines (will take precendence over platform baselines). "
    154                  "Specify multiple times to add multiple search path entries."),
    155         optparse.make_option("--additional-expectations", action="append", default=[],
    156             help="Path to a test_expectations file that will override previous expectations. "
    157                  "Specify multiple times for multiple sets of overrides."),
    158         optparse.make_option("--compare-port", action="store", default=None,
    159             help="Use the specified port's baselines first"),
    160         optparse.make_option("--no-show-results", action="store_false",
    161             default=True, dest="show_results",
    162             help="Don't launch a browser with results after the tests "
    163                  "are done"),
    164         optparse.make_option("--full-results-html", action="store_true",
    165             default=False,
    166             help="Show all failures in results.html, rather than only regressions"),
    167         optparse.make_option("--clobber-old-results", action="store_true",
    168             default=False, help="Clobbers test results from previous runs."),
    169     ]))
    170 
    171     option_group_definitions.append(("Testing Options", [
    172         optparse.make_option("--build", dest="build",
    173             action="store_true", default=True,
    174             help="Check to ensure the build is up-to-date (default)."),
    175         optparse.make_option("--no-build", dest="build",
    176             action="store_false", help="Don't check to see if the build is up-to-date."),
    177         optparse.make_option("-n", "--dry-run", action="store_true",
    178             default=False,
    179             help="Do everything but actually run the tests or upload results."),
    180         optparse.make_option("--wrapper",
    181             help="wrapper command to insert before invocations of "
    182                  "the driver; option is split on whitespace before "
    183                  "running. (Example: --wrapper='valgrind --smc-check=all')"),
    184         optparse.make_option("-i", "--ignore-tests", action="append", default=[],
    185             help="directories or test to ignore (may specify multiple times)"),
    186         optparse.make_option("--ignore-flaky-tests", action="store",
    187             help=("Control whether tests that are flaky on the bots get ignored."
    188                 "'very-flaky' == Ignore any tests that flaked more than once on the bot."
    189                 "'maybe-flaky' == Ignore any tests that flaked once on the bot."
    190                 "'unexpected' == Ignore any tests that had unexpected results on the bot.")),
    191         optparse.make_option("--ignore-builder-category", action="store",
    192             help=("The category of builders to use with the --ignore-flaky-tests "
    193                 "option ('layout' or 'deps').")),
    194         optparse.make_option("--test-list", action="append",
    195             help="read list of tests to run from file", metavar="FILE"),
    196         optparse.make_option("--skipped", action="store", default="default",
    197             help=("control how tests marked SKIP are run. "
    198                  "'default' == Skip tests unless explicitly listed on the command line, "
    199                  "'ignore' == Run them anyway, "
    200                  "'only' == only run the SKIP tests, "
    201                  "'always' == always skip, even if listed on the command line.")),
    202         optparse.make_option("--time-out-ms",
    203             help="Set the timeout for each test"),
    204         optparse.make_option("--order", action="store", default="natural",
    205             help=("determine the order in which the test cases will be run. "
    206                   "'none' == use the order in which the tests were listed either in arguments or test list, "
    207                   "'natural' == use the natural order (default), "
    208                   "'random-seeded' == randomize the test order using a fixed seed, "
    209                   "'random' == randomize the test order.")),
    210         optparse.make_option("--run-chunk",
    211             help=("Run a specified chunk (n:l), the nth of len l, "
    212                  "of the layout tests")),
    213         optparse.make_option("--run-part", help=("Run a specified part (n:m), "
    214                   "the nth of m parts, of the layout tests")),
    215         optparse.make_option("--batch-size",
    216             help=("Run a the tests in batches (n), after every n tests, "
    217                   "the driver is relaunched."), type="int", default=None),
    218         optparse.make_option("--run-singly", action="store_true",
    219             default=False, help="run a separate driver for each test (implies --verbose)"),
    220         optparse.make_option("--child-processes",
    221             help="Number of drivers to run in parallel."),
    222         # FIXME: Display default number of child processes that will run.
    223         optparse.make_option("-f", "--fully-parallel", action="store_true",
    224             help="run all tests in parallel"),
    225         optparse.make_option("--exit-after-n-failures", type="int", default=None,
    226             help="Exit after the first N failures instead of running all "
    227             "tests"),
    228         optparse.make_option("--exit-after-n-crashes-or-timeouts", type="int",
    229             default=None, help="Exit after the first N crashes instead of "
    230             "running all tests"),
    231         optparse.make_option("--iterations", type="int", default=1, help="Number of times to run the set of tests (e.g. ABCABCABC)"),
    232         optparse.make_option("--repeat-each", type="int", default=1, help="Number of times to run each test (e.g. AAABBBCCC)"),
    233         optparse.make_option("--retry-failures", action="store_true",
    234             help="Re-try any tests that produce unexpected results. Default is to not retry if an explicit list of tests is passed to run-webkit-tests."),
    235         optparse.make_option("--no-retry-failures", action="store_false",
    236             dest="retry_failures",
    237             help="Don't re-try any tests that produce unexpected results."),
    238 
    239         # FIXME: Remove this after we remove the flag from the v8 bot.
    240         optparse.make_option("--retry-crashes", action="store_true",
    241             default=False,
    242             help="ignored (we now always retry crashes when we retry failures)."),
    243 
    244         optparse.make_option("--max-locked-shards", type="int", default=0,
    245             help="Set the maximum number of locked shards"),
    246         optparse.make_option("--additional-env-var", type="string", action="append", default=[],
    247             help="Passes that environment variable to the tests (--additional-env-var=NAME=VALUE)"),
    248         optparse.make_option("--profile", action="store_true",
    249             help="Output per-test profile information."),
    250         optparse.make_option("--profiler", action="store",
    251             help="Output per-test profile information, using the specified profiler."),
    252     ]))
    253 
    254     option_group_definitions.append(("Miscellaneous Options", [
    255         optparse.make_option("--lint-test-files", action="store_true",
    256         default=False, help=("Makes sure the test files parse for all "
    257                             "configurations. Does not run any tests.")),
    258     ]))
    259 
    260     # FIXME: Move these into json_results_generator.py
    261     option_group_definitions.append(("Result JSON Options", [
    262         optparse.make_option("--master-name", help="The name of the buildbot master."),
    263         optparse.make_option("--builder-name", default="",
    264             help=("The name of the builder shown on the waterfall running "
    265                   "this script e.g. WebKit.")),
    266         optparse.make_option("--build-name", default="DUMMY_BUILD_NAME",
    267             help=("The name of the builder used in its path, e.g. "
    268                   "webkit-rel.")),
    269         optparse.make_option("--build-number", default="DUMMY_BUILD_NUMBER",
    270             help=("The build number of the builder running this script.")),
    271         optparse.make_option("--test-results-server", default="",
    272             help=("If specified, upload results json files to this appengine "
    273                   "server.")),
    274     ]))
    275 
    276     option_parser = optparse.OptionParser()
    277 
    278     for group_name, group_options in option_group_definitions:
    279         option_group = optparse.OptionGroup(option_parser, group_name)
    280         option_group.add_options(group_options)
    281         option_parser.add_option_group(option_group)
    282 
    283     return option_parser.parse_args(args)
    284 
    285 
    286 def _set_up_derived_options(port, options):
    287     """Sets the options values that depend on other options values."""
    288     if not options.child_processes:
    289         options.child_processes = os.environ.get("WEBKIT_TEST_CHILD_PROCESSES",
    290                                                  str(port.default_child_processes()))
    291     if not options.max_locked_shards:
    292         options.max_locked_shards = int(os.environ.get("WEBKIT_TEST_MAX_LOCKED_SHARDS",
    293                                                        str(port.default_max_locked_shards())))
    294 
    295     if not options.configuration:
    296         options.configuration = port.default_configuration()
    297 
    298     if options.pixel_tests is None:
    299         options.pixel_tests = port.default_pixel_tests()
    300 
    301     if not options.time_out_ms:
    302         options.time_out_ms = str(port.default_timeout_ms())
    303 
    304     options.slow_time_out_ms = str(5 * int(options.time_out_ms))
    305 
    306     if options.additional_platform_directory:
    307         additional_platform_directories = []
    308         for path in options.additional_platform_directory:
    309             additional_platform_directories.append(port.host.filesystem.abspath(path))
    310         options.additional_platform_directory = additional_platform_directories
    311 
    312     if options.new_baseline:
    313         options.reset_results = True
    314         options.add_platform_exceptions = True
    315 
    316     if options.pixel_test_directories:
    317         options.pixel_tests = True
    318         varified_dirs = set()
    319         pixel_test_directories = options.pixel_test_directories
    320         for directory in pixel_test_directories:
    321             # FIXME: we should support specifying the directories all the ways we support it for additional
    322             # arguments specifying which tests and directories to run. We should also move the logic for that
    323             # to Port.
    324             filesystem = port.host.filesystem
    325             if not filesystem.isdir(filesystem.join(port.layout_tests_dir(), directory)):
    326                 _log.warning("'%s' was passed to --pixel-test-directories, which doesn't seem to be a directory" % str(directory))
    327             else:
    328                 varified_dirs.add(directory)
    329 
    330         options.pixel_test_directories = list(varified_dirs)
    331 
    332     if options.run_singly:
    333         options.verbose = True
    334 
    335 
    336 def run(port, options, args, logging_stream):
    337     logger = logging.getLogger()
    338     logger.setLevel(logging.DEBUG if options.debug_rwt_logging else logging.INFO)
    339 
    340     try:
    341         printer = printing.Printer(port, options, logging_stream, logger=logger)
    342 
    343         _set_up_derived_options(port, options)
    344         manager = Manager(port, options, printer)
    345         printer.print_config(port.results_directory())
    346 
    347         run_details = manager.run(args)
    348         _log.debug("Testing completed, Exit status: %d" % run_details.exit_code)
    349         return run_details
    350     finally:
    351         printer.cleanup()
    352 
    353 if __name__ == '__main__':
    354     sys.exit(main(sys.argv[1:], sys.stdout, sys.stderr))
    355