Home | History | Annotate | Download | only in valgrind
      1 #!/usr/bin/env python
      2 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
      3 # Use of this source code is governed by a BSD-style license that can be
      4 # found in the LICENSE file.
      5 
      6 ''' Runs various chrome tests through valgrind_test.py.'''
      7 
      8 import glob
      9 import logging
     10 import multiprocessing
     11 import optparse
     12 import os
     13 import stat
     14 import sys
     15 
     16 import logging_utils
     17 import path_utils
     18 
     19 import common
     20 import valgrind_test
     21 
     22 class TestNotFound(Exception): pass
     23 
     24 class MultipleGTestFiltersSpecified(Exception): pass
     25 
     26 class BuildDirNotFound(Exception): pass
     27 
     28 class BuildDirAmbiguous(Exception): pass
     29 
     30 class ChromeTests:
     31   SLOW_TOOLS = ["memcheck", "tsan", "tsan_rv", "drmemory"]
     32   LAYOUT_TESTS_DEFAULT_CHUNK_SIZE = 300
     33 
     34   def __init__(self, options, args, test):
     35     if ':' in test:
     36       (self._test, self._gtest_filter) = test.split(':', 1)
     37     else:
     38       self._test = test
     39       self._gtest_filter = options.gtest_filter
     40 
     41     if self._test not in self._test_list:
     42       raise TestNotFound("Unknown test: %s" % test)
     43 
     44     if options.gtest_filter and options.gtest_filter != self._gtest_filter:
     45       raise MultipleGTestFiltersSpecified("Can not specify both --gtest_filter "
     46                                           "and --test %s" % test)
     47 
     48     self._options = options
     49     self._args = args
     50 
     51     script_dir = path_utils.ScriptDir()
     52     # Compute the top of the tree (the "source dir") from the script dir (where
     53     # this script lives).  We assume that the script dir is in tools/valgrind/
     54     # relative to the top of the tree.
     55     self._source_dir = os.path.dirname(os.path.dirname(script_dir))
     56     # since this path is used for string matching, make sure it's always
     57     # an absolute Unix-style path
     58     self._source_dir = os.path.abspath(self._source_dir).replace('\\', '/')
     59     valgrind_test_script = os.path.join(script_dir, "valgrind_test.py")
     60     self._command_preamble = ["--source-dir=%s" % (self._source_dir)]
     61 
     62     if not self._options.build_dir:
     63       dirs = [
     64         os.path.join(self._source_dir, "xcodebuild", "Debug"),
     65         os.path.join(self._source_dir, "out", "Debug"),
     66         os.path.join(self._source_dir, "build", "Debug"),
     67       ]
     68       build_dir = [d for d in dirs if os.path.isdir(d)]
     69       if len(build_dir) > 1:
     70         raise BuildDirAmbiguous("Found more than one suitable build dir:\n"
     71                                 "%s\nPlease specify just one "
     72                                 "using --build-dir" % ", ".join(build_dir))
     73       elif build_dir:
     74         self._options.build_dir = build_dir[0]
     75       else:
     76         self._options.build_dir = None
     77 
     78     if self._options.build_dir:
     79       build_dir = os.path.abspath(self._options.build_dir)
     80       self._command_preamble += ["--build-dir=%s" % (self._options.build_dir)]
     81 
     82   def _EnsureBuildDirFound(self):
     83     if not self._options.build_dir:
     84       raise BuildDirNotFound("Oops, couldn't find a build dir, please "
     85                              "specify it manually using --build-dir")
     86 
     87   def _DefaultCommand(self, tool, exe=None, valgrind_test_args=None):
     88     '''Generates the default command array that most tests will use.'''
     89     if exe and common.IsWindows():
     90       exe += '.exe'
     91 
     92     cmd = list(self._command_preamble)
     93 
     94     # Find all suppressions matching the following pattern:
     95     # tools/valgrind/TOOL/suppressions[_PLATFORM].txt
     96     # and list them with --suppressions= prefix.
     97     script_dir = path_utils.ScriptDir()
     98     tool_name = tool.ToolName();
     99     suppression_file = os.path.join(script_dir, tool_name, "suppressions.txt")
    100     if os.path.exists(suppression_file):
    101       cmd.append("--suppressions=%s" % suppression_file)
    102     # Platform-specific suppression
    103     for platform in common.PlatformNames():
    104       platform_suppression_file = \
    105           os.path.join(script_dir, tool_name, 'suppressions_%s.txt' % platform)
    106       if os.path.exists(platform_suppression_file):
    107         cmd.append("--suppressions=%s" % platform_suppression_file)
    108 
    109     if self._options.valgrind_tool_flags:
    110       cmd += self._options.valgrind_tool_flags.split(" ")
    111     if self._options.keep_logs:
    112       cmd += ["--keep_logs"]
    113     if valgrind_test_args != None:
    114       for arg in valgrind_test_args:
    115         cmd.append(arg)
    116     if exe:
    117       self._EnsureBuildDirFound()
    118       cmd.append(os.path.join(self._options.build_dir, exe))
    119       # Valgrind runs tests slowly, so slow tests hurt more; show elapased time
    120       # so we can find the slowpokes.
    121       cmd.append("--gtest_print_time")
    122       # Built-in test launcher for gtest-based executables runs tests using
    123       # multiple process by default. Force the single-process mode back.
    124       cmd.append("--single-process-tests")
    125     if self._options.gtest_repeat:
    126       cmd.append("--gtest_repeat=%s" % self._options.gtest_repeat)
    127     if self._options.gtest_shuffle:
    128       cmd.append("--gtest_shuffle")
    129     if self._options.brave_new_test_launcher:
    130       cmd.append("--brave-new-test-launcher")
    131     if self._options.test_launcher_bot_mode:
    132       cmd.append("--test-launcher-bot-mode")
    133     return cmd
    134 
    135   def Run(self):
    136     ''' Runs the test specified by command-line argument --test '''
    137     logging.info("running test %s" % (self._test))
    138     return self._test_list[self._test](self)
    139 
    140   def _AppendGtestFilter(self, tool, name, cmd):
    141     '''Append an appropriate --gtest_filter flag to the googletest binary
    142        invocation.
    143        If the user passed his own filter mentioning only one test, just use it.
    144        Othewise, filter out tests listed in the appropriate gtest_exclude files.
    145     '''
    146     if (self._gtest_filter and
    147         ":" not in self._gtest_filter and
    148         "?" not in self._gtest_filter and
    149         "*" not in self._gtest_filter):
    150       cmd.append("--gtest_filter=%s" % self._gtest_filter)
    151       return
    152 
    153     filters = []
    154     gtest_files_dir = os.path.join(path_utils.ScriptDir(), "gtest_exclude")
    155 
    156     gtest_filter_files = [
    157         os.path.join(gtest_files_dir, name + ".gtest-%s.txt" % tool.ToolName())]
    158     # Use ".gtest.txt" files only for slow tools, as they now contain
    159     # Valgrind- and Dr.Memory-specific filters.
    160     # TODO(glider): rename the files to ".gtest_slow.txt"
    161     if tool.ToolName() in ChromeTests.SLOW_TOOLS:
    162       gtest_filter_files += [os.path.join(gtest_files_dir, name + ".gtest.txt")]
    163     for platform_suffix in common.PlatformNames():
    164       gtest_filter_files += [
    165         os.path.join(gtest_files_dir, name + ".gtest_%s.txt" % platform_suffix),
    166         os.path.join(gtest_files_dir, name + ".gtest-%s_%s.txt" % \
    167             (tool.ToolName(), platform_suffix))]
    168     logging.info("Reading gtest exclude filter files:")
    169     for filename in gtest_filter_files:
    170       # strip the leading absolute path (may be very long on the bot)
    171       # and the following / or \.
    172       readable_filename = filename.replace("\\", "/")  # '\' on Windows
    173       readable_filename = readable_filename.replace(self._source_dir, "")[1:]
    174       if not os.path.exists(filename):
    175         logging.info("  \"%s\" - not found" % readable_filename)
    176         continue
    177       logging.info("  \"%s\" - OK" % readable_filename)
    178       f = open(filename, 'r')
    179       for line in f.readlines():
    180         if line.startswith("#") or line.startswith("//") or line.isspace():
    181           continue
    182         line = line.rstrip()
    183         test_prefixes = ["FLAKY", "FAILS"]
    184         for p in test_prefixes:
    185           # Strip prefixes from the test names.
    186           line = line.replace(".%s_" % p, ".")
    187         # Exclude the original test name.
    188         filters.append(line)
    189         if line[-2:] != ".*":
    190           # List all possible prefixes if line doesn't end with ".*".
    191           for p in test_prefixes:
    192             filters.append(line.replace(".", ".%s_" % p))
    193     # Get rid of duplicates.
    194     filters = set(filters)
    195     gtest_filter = self._gtest_filter
    196     if len(filters):
    197       if gtest_filter:
    198         gtest_filter += ":"
    199         if gtest_filter.find("-") < 0:
    200           gtest_filter += "-"
    201       else:
    202         gtest_filter = "-"
    203       gtest_filter += ":".join(filters)
    204     if gtest_filter:
    205       cmd.append("--gtest_filter=%s" % gtest_filter)
    206 
    207   @staticmethod
    208   def ShowTests():
    209     test_to_names = {}
    210     for name, test_function in ChromeTests._test_list.iteritems():
    211       test_to_names.setdefault(test_function, []).append(name)
    212 
    213     name_to_aliases = {}
    214     for names in test_to_names.itervalues():
    215       names.sort(key=lambda name: len(name))
    216       name_to_aliases[names[0]] = names[1:]
    217 
    218     print
    219     print "Available tests:"
    220     print "----------------"
    221     for name, aliases in sorted(name_to_aliases.iteritems()):
    222       if aliases:
    223         print "   {} (aka {})".format(name, ', '.join(aliases))
    224       else:
    225         print "   {}".format(name)
    226 
    227   def SetupLdPath(self, requires_build_dir):
    228     if requires_build_dir:
    229       self._EnsureBuildDirFound()
    230     elif not self._options.build_dir:
    231       return
    232 
    233     # Append build_dir to LD_LIBRARY_PATH so external libraries can be loaded.
    234     if (os.getenv("LD_LIBRARY_PATH")):
    235       os.putenv("LD_LIBRARY_PATH", "%s:%s" % (os.getenv("LD_LIBRARY_PATH"),
    236                                               self._options.build_dir))
    237     else:
    238       os.putenv("LD_LIBRARY_PATH", self._options.build_dir)
    239 
    240   def SimpleTest(self, module, name, valgrind_test_args=None, cmd_args=None):
    241     tool = valgrind_test.CreateTool(self._options.valgrind_tool)
    242     cmd = self._DefaultCommand(tool, name, valgrind_test_args)
    243     self._AppendGtestFilter(tool, name, cmd)
    244     cmd.extend(['--test-tiny-timeout=1000'])
    245     if cmd_args:
    246       cmd.extend(cmd_args)
    247 
    248     self.SetupLdPath(True)
    249     return tool.Run(cmd, module)
    250 
    251   def RunCmdLine(self):
    252     tool = valgrind_test.CreateTool(self._options.valgrind_tool)
    253     cmd = self._DefaultCommand(tool, None, self._args)
    254     self.SetupLdPath(False)
    255     return tool.Run(cmd, None)
    256 
    257   def TestAccessibility(self):
    258     return self.SimpleTest("accessibility", "accessibility_unittests")
    259 
    260   def TestAddressInput(self):
    261     return self.SimpleTest("addressinput", "libaddressinput_unittests")
    262 
    263   def TestAngle(self):
    264     return self.SimpleTest("angle", "angle_unittests")
    265 
    266   def TestAppList(self):
    267     return self.SimpleTest("app_list", "app_list_unittests")
    268 
    269   def TestAsh(self):
    270     return self.SimpleTest("ash", "ash_unittests")
    271 
    272   def TestAshShell(self):
    273     return self.SimpleTest("ash_shelf", "ash_shell_unittests")
    274 
    275   def TestAura(self):
    276     return self.SimpleTest("aura", "aura_unittests")
    277 
    278   def TestBase(self):
    279     return self.SimpleTest("base", "base_unittests")
    280 
    281   def TestBlinkHeap(self):
    282     return self.SimpleTest("blink_heap", "blink_heap_unittests")
    283 
    284   def TestBlinkPlatform(self):
    285     return self.SimpleTest("blink_platform", "blink_platform_unittests")
    286 
    287   def TestCacheInvalidation(self):
    288     return self.SimpleTest("cacheinvalidation", "cacheinvalidation_unittests")
    289 
    290   def TestCast(self):
    291     return self.SimpleTest("chrome", "cast_unittests")
    292 
    293   def TestCC(self):
    294     return self.SimpleTest("cc", "cc_unittests")
    295 
    296   def TestChromeApp(self):
    297     return self.SimpleTest("chrome_app", "chrome_app_unittests")
    298 
    299   def TestChromeElf(self):
    300     return self.SimpleTest("chrome_elf", "chrome_elf_unittests")
    301 
    302   def TestChromeDriver(self):
    303     return self.SimpleTest("chromedriver", "chromedriver_unittests")
    304 
    305   def TestChromeOS(self):
    306     return self.SimpleTest("chromeos", "chromeos_unittests")
    307 
    308   def TestCloudPrint(self):
    309     return self.SimpleTest("cloud_print", "cloud_print_unittests")
    310 
    311   def TestComponents(self):
    312     return self.SimpleTest("components", "components_unittests")
    313 
    314   def TestCompositor(self):
    315     return self.SimpleTest("compositor", "compositor_unittests")
    316 
    317   def TestContent(self):
    318     return self.SimpleTest("content", "content_unittests")
    319 
    320   def TestCourgette(self):
    321     return self.SimpleTest("courgette", "courgette_unittests")
    322 
    323   def TestCrypto(self):
    324     return self.SimpleTest("crypto", "crypto_unittests")
    325 
    326   def TestDevice(self):
    327     return self.SimpleTest("device", "device_unittests")
    328 
    329   def TestDisplay(self):
    330     return self.SimpleTest("display", "display_unittests")
    331 
    332   def TestEvents(self):
    333     return self.SimpleTest("events", "events_unittests")
    334 
    335   def TestExtensions(self):
    336     return self.SimpleTest("extensions", "extensions_unittests")
    337 
    338   def TestFFmpeg(self):
    339     return self.SimpleTest("chrome", "ffmpeg_unittests")
    340 
    341   def TestFFmpegRegressions(self):
    342     return self.SimpleTest("chrome", "ffmpeg_regression_tests")
    343 
    344   def TestGCM(self):
    345     return self.SimpleTest("gcm", "gcm_unit_tests")
    346 
    347   def TestGfx(self):
    348     return self.SimpleTest("gfx", "gfx_unittests")
    349 
    350   def TestGin(self):
    351     return self.SimpleTest("gin", "gin_unittests")
    352 
    353   def TestGoogleApis(self):
    354     return self.SimpleTest("google_apis", "google_apis_unittests")
    355 
    356   def TestGPU(self):
    357     return self.SimpleTest("gpu", "gpu_unittests")
    358 
    359   def TestIpc(self):
    360     return self.SimpleTest("ipc", "ipc_tests",
    361                            valgrind_test_args=["--trace_children"])
    362 
    363   def TestInstallerUtil(self):
    364     return self.SimpleTest("installer_util", "installer_util_unittests")
    365 
    366   def TestJingle(self):
    367     return self.SimpleTest("chrome", "jingle_unittests")
    368 
    369   def TestKeyboard(self):
    370     return self.SimpleTest("keyboard", "keyboard_unittests")
    371 
    372   def TestMedia(self):
    373     return self.SimpleTest("chrome", "media_unittests")
    374 
    375   def TestMessageCenter(self):
    376     return self.SimpleTest("message_center", "message_center_unittests")
    377 
    378   def TestMojoAppsJS(self):
    379     return self.SimpleTest("mojo_apps_js", "mojo_apps_js_unittests")
    380 
    381   def TestMojoCommon(self):
    382     return self.SimpleTest("mojo_common", "mojo_common_unittests")
    383 
    384   def TestMojoJS(self):
    385     return self.SimpleTest("mojo_js", "mojo_js_unittests")
    386 
    387   def TestMojoPublicBindings(self):
    388     return self.SimpleTest("mojo_public_bindings",
    389                            "mojo_public_bindings_unittests")
    390 
    391   def TestMojoPublicEnv(self):
    392     return self.SimpleTest("mojo_public_env",
    393                            "mojo_public_environment_unittests")
    394 
    395   def TestMojoPublicSystem(self):
    396     return self.SimpleTest("mojo_public_system",
    397                            "mojo_public_system_unittests")
    398 
    399   def TestMojoPublicSysPerf(self):
    400     return self.SimpleTest("mojo_public_sysperf",
    401                            "mojo_public_system_perftests")
    402 
    403   def TestMojoPublicUtility(self):
    404     return self.SimpleTest("mojo_public_utility",
    405                            "mojo_public_utility_unittests")
    406 
    407   def TestMojoApplicationManager(self):
    408     return self.SimpleTest("mojo_application_manager",
    409                            "mojo_application_manager_unittests")
    410 
    411   def TestMojoSystem(self):
    412     return self.SimpleTest("mojo_system", "mojo_system_unittests")
    413 
    414   def TestMojoViewManager(self):
    415     return self.SimpleTest("mojo_view_manager", "mojo_view_manager_unittests")
    416 
    417   def TestNet(self):
    418     return self.SimpleTest("net", "net_unittests")
    419 
    420   def TestNetPerf(self):
    421     return self.SimpleTest("net", "net_perftests")
    422 
    423   def TestPhoneNumber(self):
    424     return self.SimpleTest("phonenumber", "libphonenumber_unittests")
    425 
    426   def TestPPAPI(self):
    427     return self.SimpleTest("chrome", "ppapi_unittests")
    428 
    429   def TestPrinting(self):
    430     return self.SimpleTest("chrome", "printing_unittests")
    431 
    432   def TestRemoting(self):
    433     return self.SimpleTest("chrome", "remoting_unittests",
    434                            cmd_args=[
    435                                "--ui-test-action-timeout=60000",
    436                                "--ui-test-action-max-timeout=150000"])
    437 
    438   def TestSql(self):
    439     return self.SimpleTest("chrome", "sql_unittests")
    440 
    441   def TestSync(self):
    442     return self.SimpleTest("chrome", "sync_unit_tests")
    443 
    444   def TestLinuxSandbox(self):
    445     return self.SimpleTest("sandbox", "sandbox_linux_unittests")
    446 
    447   def TestUnit(self):
    448     # http://crbug.com/51716
    449     # Disabling all unit tests
    450     # Problems reappeared after r119922
    451     if common.IsMac() and (self._options.valgrind_tool == "memcheck"):
    452       logging.warning("unit_tests are disabled for memcheck on MacOS.")
    453       return 0;
    454     return self.SimpleTest("chrome", "unit_tests")
    455 
    456   def TestUIUnit(self):
    457     return self.SimpleTest("chrome", "ui_unittests")
    458 
    459   def TestURL(self):
    460     return self.SimpleTest("chrome", "url_unittests")
    461 
    462   def TestViews(self):
    463     return self.SimpleTest("views", "views_unittests")
    464 
    465 
    466   # Valgrind timeouts are in seconds.
    467   UI_VALGRIND_ARGS = ["--timeout=14400", "--trace_children", "--indirect"]
    468   # UI test timeouts are in milliseconds.
    469   UI_TEST_ARGS = ["--ui-test-action-timeout=60000",
    470                   "--ui-test-action-max-timeout=150000",
    471                   "--no-sandbox"]
    472 
    473   # TODO(thestig) fine-tune these values.
    474   # Valgrind timeouts are in seconds.
    475   BROWSER_VALGRIND_ARGS = ["--timeout=50000", "--trace_children", "--indirect"]
    476   # Browser test timeouts are in milliseconds.
    477   BROWSER_TEST_ARGS = ["--ui-test-action-timeout=400000",
    478                        "--ui-test-action-max-timeout=800000",
    479                        "--no-sandbox"]
    480 
    481   def TestBrowser(self):
    482     return self.SimpleTest("chrome", "browser_tests",
    483                            valgrind_test_args=self.BROWSER_VALGRIND_ARGS,
    484                            cmd_args=self.BROWSER_TEST_ARGS)
    485 
    486   def TestContentBrowser(self):
    487     return self.SimpleTest("content", "content_browsertests",
    488                            valgrind_test_args=self.BROWSER_VALGRIND_ARGS,
    489                            cmd_args=self.BROWSER_TEST_ARGS)
    490 
    491   def TestInteractiveUI(self):
    492     return self.SimpleTest("chrome", "interactive_ui_tests",
    493                            valgrind_test_args=self.UI_VALGRIND_ARGS,
    494                            cmd_args=self.UI_TEST_ARGS)
    495 
    496   def TestSafeBrowsing(self):
    497     return self.SimpleTest("chrome", "safe_browsing_tests",
    498                            valgrind_test_args=self.UI_VALGRIND_ARGS,
    499                            cmd_args=(["--ui-test-action-max-timeout=450000"]))
    500 
    501   def TestSyncIntegration(self):
    502     return self.SimpleTest("chrome", "sync_integration_tests",
    503                            valgrind_test_args=self.UI_VALGRIND_ARGS,
    504                            cmd_args=(["--ui-test-action-max-timeout=450000"]))
    505 
    506   def TestLayoutChunk(self, chunk_num, chunk_size):
    507     # Run tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size) from the
    508     # list of tests.  Wrap around to beginning of list at end.
    509     # If chunk_size is zero, run all tests in the list once.
    510     # If a text file is given as argument, it is used as the list of tests.
    511     assert((chunk_size == 0) != (len(self._args) == 0))
    512     # Build the ginormous commandline in 'cmd'.
    513     # It's going to be roughly
    514     #  python valgrind_test.py ... python run_webkit_tests.py ...
    515     # but we'll use the --indirect flag to valgrind_test.py
    516     # to avoid valgrinding python.
    517     # Start by building the valgrind_test.py commandline.
    518     tool = valgrind_test.CreateTool(self._options.valgrind_tool)
    519     cmd = self._DefaultCommand(tool)
    520     cmd.append("--trace_children")
    521     cmd.append("--indirect_webkit_layout")
    522     cmd.append("--ignore_exit_code")
    523     # Now build script_cmd, the run_webkits_tests.py commandline
    524     # Store each chunk in its own directory so that we can find the data later
    525     chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num)
    526     out_dir = os.path.join(path_utils.ScriptDir(), "latest")
    527     out_dir = os.path.join(out_dir, chunk_dir)
    528     if os.path.exists(out_dir):
    529       old_files = glob.glob(os.path.join(out_dir, "*.txt"))
    530       for f in old_files:
    531         os.remove(f)
    532     else:
    533       os.makedirs(out_dir)
    534     script = os.path.join(self._source_dir, "webkit", "tools", "layout_tests",
    535                           "run_webkit_tests.py")
    536     # http://crbug.com/260627: After the switch to content_shell from DRT, each
    537     # test now brings up 3 processes.  Under Valgrind, they become memory bound
    538     # and can eventually OOM if we don't reduce the total count.
    539     # It'd be nice if content_shell automatically throttled the startup of new
    540     # tests if we're low on memory.
    541     jobs = max(1, int(multiprocessing.cpu_count() * 0.3))
    542     script_cmd = ["python", script, "-v",
    543                   # run a separate DumpRenderTree for each test
    544                   "--batch-size=1",
    545                   "--fully-parallel",
    546                   "--child-processes=%d" % jobs,
    547                   "--time-out-ms=800000",
    548                   "--no-retry-failures",  # retrying takes too much time
    549                   # http://crbug.com/176908: Don't launch a browser when done.
    550                   "--no-show-results",
    551                   "--nocheck-sys-deps"]
    552     # Pass build mode to run_webkit_tests.py.  We aren't passed it directly,
    553     # so parse it out of build_dir.  run_webkit_tests.py can only handle
    554     # the two values "Release" and "Debug".
    555     # TODO(Hercules): unify how all our scripts pass around build mode
    556     # (--mode / --target / --build-dir / --debug)
    557     if self._options.build_dir:
    558       build_root, mode = os.path.split(self._options.build_dir)
    559       script_cmd.extend(["--build-directory", build_root, "--target", mode])
    560     if (chunk_size > 0):
    561       script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size))
    562     if len(self._args):
    563       # if the arg is a txt file, then treat it as a list of tests
    564       if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt":
    565         script_cmd.append("--test-list=%s" % self._args[0])
    566       else:
    567         script_cmd.extend(self._args)
    568     self._AppendGtestFilter(tool, "layout", script_cmd)
    569     # Now run script_cmd with the wrapper in cmd
    570     cmd.extend(["--"])
    571     cmd.extend(script_cmd)
    572 
    573     # Layout tests often times fail quickly, but the buildbot remains green.
    574     # Detect this situation when running with the default chunk size.
    575     if chunk_size == self.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE:
    576       min_runtime_in_seconds=120
    577     else:
    578       min_runtime_in_seconds=0
    579     ret = tool.Run(cmd, "layout", min_runtime_in_seconds=min_runtime_in_seconds)
    580     return ret
    581 
    582 
    583   def TestLayout(self):
    584     # A "chunk file" is maintained in the local directory so that each test
    585     # runs a slice of the layout tests of size chunk_size that increments with
    586     # each run.  Since tests can be added and removed from the layout tests at
    587     # any time, this is not going to give exact coverage, but it will allow us
    588     # to continuously run small slices of the layout tests under valgrind rather
    589     # than having to run all of them in one shot.
    590     chunk_size = self._options.num_tests
    591     if chunk_size == 0 or len(self._args):
    592       return self.TestLayoutChunk(0, 0)
    593     chunk_num = 0
    594     chunk_file = os.path.join("valgrind_layout_chunk.txt")
    595     logging.info("Reading state from " + chunk_file)
    596     try:
    597       f = open(chunk_file)
    598       if f:
    599         chunk_str = f.read()
    600         if len(chunk_str):
    601           chunk_num = int(chunk_str)
    602         # This should be enough so that we have a couple of complete runs
    603         # of test data stored in the archive (although note that when we loop
    604         # that we almost guaranteed won't be at the end of the test list)
    605         if chunk_num > 10000:
    606           chunk_num = 0
    607         f.close()
    608     except IOError, (errno, strerror):
    609       logging.error("error reading from file %s (%d, %s)" % (chunk_file,
    610                     errno, strerror))
    611     # Save the new chunk size before running the tests. Otherwise if a
    612     # particular chunk hangs the bot, the chunk number will never get
    613     # incremented and the bot will be wedged.
    614     logging.info("Saving state to " + chunk_file)
    615     try:
    616       f = open(chunk_file, "w")
    617       chunk_num += 1
    618       f.write("%d" % chunk_num)
    619       f.close()
    620     except IOError, (errno, strerror):
    621       logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno,
    622                     strerror))
    623     # Since we're running small chunks of the layout tests, it's important to
    624     # mark the ones that have errors in them.  These won't be visible in the
    625     # summary list for long, but will be useful for someone reviewing this bot.
    626     return self.TestLayoutChunk(chunk_num, chunk_size)
    627 
    628   # The known list of tests.
    629   # Recognise the original abbreviations as well as full executable names.
    630   _test_list = {
    631     "cmdline" : RunCmdLine,
    632     "addressinput": TestAddressInput,
    633     "libaddressinput_unittests": TestAddressInput,
    634     "accessibility": TestAccessibility,
    635     "angle": TestAngle,          "angle_unittests": TestAngle,
    636     "app_list": TestAppList,     "app_list_unittests": TestAppList,
    637     "ash": TestAsh,              "ash_unittests": TestAsh,
    638     "ash_shell": TestAshShell,   "ash_shell_unittests": TestAshShell,
    639     "aura": TestAura,            "aura_unittests": TestAura,
    640     "base": TestBase,            "base_unittests": TestBase,
    641     "blink_heap": TestBlinkHeap,
    642     "blink_platform": TestBlinkPlatform,
    643     "browser": TestBrowser,      "browser_tests": TestBrowser,
    644     "cacheinvalidation": TestCacheInvalidation,
    645     "cacheinvalidation_unittests": TestCacheInvalidation,
    646     "cast": TestCast,            "cast_unittests": TestCast,
    647     "cc": TestCC,                "cc_unittests": TestCC,
    648     "chrome_app": TestChromeApp,
    649     "chrome_elf": TestChromeElf,
    650     "chromedriver": TestChromeDriver,
    651     "chromeos": TestChromeOS,    "chromeos_unittests": TestChromeOS,
    652     "cloud_print": TestCloudPrint,
    653     "cloud_print_unittests": TestCloudPrint,
    654     "components": TestComponents,"components_unittests": TestComponents,
    655     "compositor": TestCompositor,"compositor_unittests": TestCompositor,
    656     "content": TestContent,      "content_unittests": TestContent,
    657     "content_browsertests": TestContentBrowser,
    658     "courgette": TestCourgette,  "courgette_unittests": TestCourgette,
    659     "crypto": TestCrypto,        "crypto_unittests": TestCrypto,
    660     "device": TestDevice,        "device_unittests": TestDevice,
    661     "display": TestDisplay,      "display_unittests": TestDisplay,
    662     "events": TestEvents,        "events_unittests": TestEvents,
    663     "extensions": TestExtensions, "extensions_unittests": TestExtensions,
    664     "ffmpeg": TestFFmpeg,        "ffmpeg_unittests": TestFFmpeg,
    665     "ffmpeg_regression_tests": TestFFmpegRegressions,
    666     "gcm": TestGCM,              "gcm_unit_tests": TestGCM,
    667     "gin": TestGin,              "gin_unittests": TestGin,
    668     "gfx": TestGfx,              "gfx_unittests": TestGfx,
    669     "google_apis": TestGoogleApis,
    670     "gpu": TestGPU,              "gpu_unittests": TestGPU,
    671     "ipc": TestIpc,              "ipc_tests": TestIpc,
    672     "installer_util": TestInstallerUtil,
    673     "interactive_ui": TestInteractiveUI,
    674     "jingle": TestJingle,        "jingle_unittests": TestJingle,
    675     "keyboard": TestKeyboard,    "keyboard_unittests": TestKeyboard,
    676     "layout": TestLayout,        "layout_tests": TestLayout,
    677     "media": TestMedia,          "media_unittests": TestMedia,
    678     "message_center": TestMessageCenter,
    679     "message_center_unittests" : TestMessageCenter,
    680     "mojo_apps_js": TestMojoAppsJS,
    681     "mojo_common": TestMojoCommon,
    682     "mojo_js": TestMojoJS,
    683     "mojo_system": TestMojoSystem,
    684     "mojo_public_system": TestMojoPublicSystem,
    685     "mojo_public_utility": TestMojoPublicUtility,
    686     "mojo_public_bindings": TestMojoPublicBindings,
    687     "mojo_public_env": TestMojoPublicEnv,
    688     "mojo_public_sysperf": TestMojoPublicSysPerf,
    689     "mojo_application_manager": TestMojoApplicationManager,
    690     "mojo_view_manager": TestMojoViewManager,
    691     "net": TestNet,              "net_unittests": TestNet,
    692     "net_perf": TestNetPerf,     "net_perftests": TestNetPerf,
    693     "phonenumber": TestPhoneNumber,
    694     "libphonenumber_unittests": TestPhoneNumber,
    695     "ppapi": TestPPAPI,          "ppapi_unittests": TestPPAPI,
    696     "printing": TestPrinting,    "printing_unittests": TestPrinting,
    697     "remoting": TestRemoting,    "remoting_unittests": TestRemoting,
    698     "safe_browsing": TestSafeBrowsing, "safe_browsing_tests": TestSafeBrowsing,
    699     "sandbox": TestLinuxSandbox, "sandbox_linux_unittests": TestLinuxSandbox,
    700     "sql": TestSql,              "sql_unittests": TestSql,
    701     "sync": TestSync,            "sync_unit_tests": TestSync,
    702     "sync_integration_tests": TestSyncIntegration,
    703     "sync_integration": TestSyncIntegration,
    704     "ui_unit": TestUIUnit,       "ui_unittests": TestUIUnit,
    705     "unit": TestUnit,            "unit_tests": TestUnit,
    706     "url": TestURL,              "url_unittests": TestURL,
    707     "views": TestViews,          "views_unittests": TestViews,
    708     "webkit": TestLayout,
    709   }
    710 
    711 
    712 def _main():
    713   parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> "
    714                                  "[-t <test> ...]")
    715 
    716   parser.add_option("--help-tests", dest="help_tests", action="store_true",
    717                     default=False, help="List all available tests")
    718   parser.add_option("-b", "--build-dir",
    719                     help="the location of the compiler output")
    720   parser.add_option("--target", help="Debug or Release")
    721   parser.add_option("-t", "--test", action="append", default=[],
    722                     help="which test to run, supports test:gtest_filter format "
    723                          "as well.")
    724   parser.add_option("--baseline", action="store_true", default=False,
    725                     help="generate baseline data instead of validating")
    726   parser.add_option("--gtest_filter",
    727                     help="additional arguments to --gtest_filter")
    728   parser.add_option("--gtest_repeat", help="argument for --gtest_repeat")
    729   parser.add_option("--gtest_shuffle", action="store_true", default=False,
    730                     help="Randomize tests' orders on every iteration.")
    731   parser.add_option("-v", "--verbose", action="store_true", default=False,
    732                     help="verbose output - enable debug log messages")
    733   parser.add_option("--tool", dest="valgrind_tool", default="memcheck",
    734                     help="specify a valgrind tool to run the tests under")
    735   parser.add_option("--tool_flags", dest="valgrind_tool_flags", default="",
    736                     help="specify custom flags for the selected valgrind tool")
    737   parser.add_option("--keep_logs", action="store_true", default=False,
    738                     help="store memory tool logs in the <tool>.logs directory "
    739                          "instead of /tmp.\nThis can be useful for tool "
    740                          "developers/maintainers.\nPlease note that the <tool>"
    741                          ".logs directory will be clobbered on tool startup.")
    742   parser.add_option("-n", "--num_tests", type="int",
    743                     default=ChromeTests.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE,
    744                     help="for layout tests: # of subtests per run.  0 for all.")
    745   # TODO(thestig) Remove this if we can.
    746   parser.add_option("--gtest_color", dest="gtest_color", default="no",
    747                     help="dummy compatibility flag for sharding_supervisor.")
    748   parser.add_option("--brave-new-test-launcher", action="store_true",
    749                     help="run the tests with --brave-new-test-launcher")
    750   parser.add_option("--test-launcher-bot-mode", action="store_true",
    751                     help="run the tests with --test-launcher-bot-mode")
    752 
    753   options, args = parser.parse_args()
    754 
    755   # Bake target into build_dir.
    756   if options.target and options.build_dir:
    757     assert (options.target !=
    758             os.path.basename(os.path.dirname(options.build_dir)))
    759     options.build_dir = os.path.join(os.path.abspath(options.build_dir),
    760                                      options.target)
    761 
    762   if options.verbose:
    763     logging_utils.config_root(logging.DEBUG)
    764   else:
    765     logging_utils.config_root()
    766 
    767   if options.help_tests:
    768     ChromeTests.ShowTests()
    769     return 0
    770 
    771   if not options.test:
    772     parser.error("--test not specified")
    773 
    774   if len(options.test) != 1 and options.gtest_filter:
    775     parser.error("--gtest_filter and multiple tests don't make sense together")
    776 
    777   for t in options.test:
    778     tests = ChromeTests(options, args, t)
    779     ret = tests.Run()
    780     if ret: return ret
    781   return 0
    782 
    783 
    784 if __name__ == "__main__":
    785   sys.exit(_main())
    786