Home | History | Annotate | Download | only in valgrind
      1 #!/usr/bin/env python
      2 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
      3 # Use of this source code is governed by a BSD-style license that can be
      4 # found in the LICENSE file.
      5 
      6 ''' Runs various chrome tests through valgrind_test.py.'''
      7 
      8 import glob
      9 import logging
     10 import multiprocessing
     11 import optparse
     12 import os
     13 import stat
     14 import sys
     15 
     16 import logging_utils
     17 import path_utils
     18 
     19 import common
     20 import valgrind_test
     21 
     22 class TestNotFound(Exception): pass
     23 
     24 class MultipleGTestFiltersSpecified(Exception): pass
     25 
     26 class BuildDirNotFound(Exception): pass
     27 
     28 class BuildDirAmbiguous(Exception): pass
     29 
     30 class ChromeTests:
     31   SLOW_TOOLS = ["memcheck", "tsan", "tsan_rv", "drmemory"]
     32   LAYOUT_TESTS_DEFAULT_CHUNK_SIZE = 500
     33 
     34   def __init__(self, options, args, test):
     35     if ':' in test:
     36       (self._test, self._gtest_filter) = test.split(':', 1)
     37     else:
     38       self._test = test
     39       self._gtest_filter = options.gtest_filter
     40 
     41     if self._test not in self._test_list:
     42       raise TestNotFound("Unknown test: %s" % test)
     43 
     44     if options.gtest_filter and options.gtest_filter != self._gtest_filter:
     45       raise MultipleGTestFiltersSpecified("Can not specify both --gtest_filter "
     46                                           "and --test %s" % test)
     47 
     48     self._options = options
     49     self._args = args
     50 
     51     script_dir = path_utils.ScriptDir()
     52     # Compute the top of the tree (the "source dir") from the script dir (where
     53     # this script lives).  We assume that the script dir is in tools/valgrind/
     54     # relative to the top of the tree.
     55     self._source_dir = os.path.dirname(os.path.dirname(script_dir))
     56     # since this path is used for string matching, make sure it's always
     57     # an absolute Unix-style path
     58     self._source_dir = os.path.abspath(self._source_dir).replace('\\', '/')
     59     valgrind_test_script = os.path.join(script_dir, "valgrind_test.py")
     60     self._command_preamble = ["--source-dir=%s" % (self._source_dir)]
     61 
     62     if not self._options.build_dir:
     63       dirs = [
     64         os.path.join(self._source_dir, "xcodebuild", "Debug"),
     65         os.path.join(self._source_dir, "out", "Debug"),
     66         os.path.join(self._source_dir, "build", "Debug"),
     67       ]
     68       build_dir = [d for d in dirs if os.path.isdir(d)]
     69       if len(build_dir) > 1:
     70         raise BuildDirAmbiguous("Found more than one suitable build dir:\n"
     71                                 "%s\nPlease specify just one "
     72                                 "using --build-dir" % ", ".join(build_dir))
     73       elif build_dir:
     74         self._options.build_dir = build_dir[0]
     75       else:
     76         self._options.build_dir = None
     77 
     78     if self._options.build_dir:
     79       build_dir = os.path.abspath(self._options.build_dir)
     80       self._command_preamble += ["--build-dir=%s" % (self._options.build_dir)]
     81 
     82   def _EnsureBuildDirFound(self):
     83     if not self._options.build_dir:
     84       raise BuildDirNotFound("Oops, couldn't find a build dir, please "
     85                              "specify it manually using --build-dir")
     86 
     87   def _DefaultCommand(self, tool, exe=None, valgrind_test_args=None):
     88     '''Generates the default command array that most tests will use.'''
     89     if exe and common.IsWindows():
     90       exe += '.exe'
     91 
     92     cmd = list(self._command_preamble)
     93 
     94     # Find all suppressions matching the following pattern:
     95     # tools/valgrind/TOOL/suppressions[_PLATFORM].txt
     96     # and list them with --suppressions= prefix.
     97     script_dir = path_utils.ScriptDir()
     98     tool_name = tool.ToolName();
     99     suppression_file = os.path.join(script_dir, tool_name, "suppressions.txt")
    100     if os.path.exists(suppression_file):
    101       cmd.append("--suppressions=%s" % suppression_file)
    102     # Platform-specific suppression
    103     for platform in common.PlatformNames():
    104       platform_suppression_file = \
    105           os.path.join(script_dir, tool_name, 'suppressions_%s.txt' % platform)
    106       if os.path.exists(platform_suppression_file):
    107         cmd.append("--suppressions=%s" % platform_suppression_file)
    108 
    109     if self._options.valgrind_tool_flags:
    110       cmd += self._options.valgrind_tool_flags.split(" ")
    111     if self._options.keep_logs:
    112       cmd += ["--keep_logs"]
    113     if valgrind_test_args != None:
    114       for arg in valgrind_test_args:
    115         cmd.append(arg)
    116     if exe:
    117       self._EnsureBuildDirFound()
    118       cmd.append(os.path.join(self._options.build_dir, exe))
    119       # Valgrind runs tests slowly, so slow tests hurt more; show elapased time
    120       # so we can find the slowpokes.
    121       cmd.append("--gtest_print_time")
    122       # Built-in test launcher for gtest-based executables runs tests using
    123       # multiple process by default. Force the single-process mode back.
    124       cmd.append("--single-process-tests")
    125     if self._options.gtest_repeat:
    126       cmd.append("--gtest_repeat=%s" % self._options.gtest_repeat)
    127     if self._options.gtest_shuffle:
    128       cmd.append("--gtest_shuffle")
    129     if self._options.brave_new_test_launcher:
    130       cmd.append("--brave-new-test-launcher")
    131     if self._options.test_launcher_bot_mode:
    132       cmd.append("--test-launcher-bot-mode")
    133     return cmd
    134 
    135   def Run(self):
    136     ''' Runs the test specified by command-line argument --test '''
    137     logging.info("running test %s" % (self._test))
    138     return self._test_list[self._test](self)
    139 
    140   def _AppendGtestFilter(self, tool, name, cmd):
    141     '''Append an appropriate --gtest_filter flag to the googletest binary
    142        invocation.
    143        If the user passed his own filter mentioning only one test, just use it.
    144        Othewise, filter out tests listed in the appropriate gtest_exclude files.
    145     '''
    146     if (self._gtest_filter and
    147         ":" not in self._gtest_filter and
    148         "?" not in self._gtest_filter and
    149         "*" not in self._gtest_filter):
    150       cmd.append("--gtest_filter=%s" % self._gtest_filter)
    151       return
    152 
    153     filters = []
    154     gtest_files_dir = os.path.join(path_utils.ScriptDir(), "gtest_exclude")
    155 
    156     gtest_filter_files = [
    157         os.path.join(gtest_files_dir, name + ".gtest-%s.txt" % tool.ToolName())]
    158     # Use ".gtest.txt" files only for slow tools, as they now contain
    159     # Valgrind- and Dr.Memory-specific filters.
    160     # TODO(glider): rename the files to ".gtest_slow.txt"
    161     if tool.ToolName() in ChromeTests.SLOW_TOOLS:
    162       gtest_filter_files += [os.path.join(gtest_files_dir, name + ".gtest.txt")]
    163     for platform_suffix in common.PlatformNames():
    164       gtest_filter_files += [
    165         os.path.join(gtest_files_dir, name + ".gtest_%s.txt" % platform_suffix),
    166         os.path.join(gtest_files_dir, name + ".gtest-%s_%s.txt" % \
    167             (tool.ToolName(), platform_suffix))]
    168     logging.info("Reading gtest exclude filter files:")
    169     for filename in gtest_filter_files:
    170       # strip the leading absolute path (may be very long on the bot)
    171       # and the following / or \.
    172       readable_filename = filename.replace("\\", "/")  # '\' on Windows
    173       readable_filename = readable_filename.replace(self._source_dir, "")[1:]
    174       if not os.path.exists(filename):
    175         logging.info("  \"%s\" - not found" % readable_filename)
    176         continue
    177       logging.info("  \"%s\" - OK" % readable_filename)
    178       f = open(filename, 'r')
    179       for line in f.readlines():
    180         if line.startswith("#") or line.startswith("//") or line.isspace():
    181           continue
    182         line = line.rstrip()
    183         test_prefixes = ["FLAKY", "FAILS"]
    184         for p in test_prefixes:
    185           # Strip prefixes from the test names.
    186           line = line.replace(".%s_" % p, ".")
    187         # Exclude the original test name.
    188         filters.append(line)
    189         if line[-2:] != ".*":
    190           # List all possible prefixes if line doesn't end with ".*".
    191           for p in test_prefixes:
    192             filters.append(line.replace(".", ".%s_" % p))
    193     # Get rid of duplicates.
    194     filters = set(filters)
    195     gtest_filter = self._gtest_filter
    196     if len(filters):
    197       if gtest_filter:
    198         gtest_filter += ":"
    199         if gtest_filter.find("-") < 0:
    200           gtest_filter += "-"
    201       else:
    202         gtest_filter = "-"
    203       gtest_filter += ":".join(filters)
    204     if gtest_filter:
    205       cmd.append("--gtest_filter=%s" % gtest_filter)
    206 
    207   @staticmethod
    208   def ShowTests():
    209     test_to_names = {}
    210     for name, test_function in ChromeTests._test_list.iteritems():
    211       test_to_names.setdefault(test_function, []).append(name)
    212 
    213     name_to_aliases = {}
    214     for names in test_to_names.itervalues():
    215       names.sort(key=lambda name: len(name))
    216       name_to_aliases[names[0]] = names[1:]
    217 
    218     print
    219     print "Available tests:"
    220     print "----------------"
    221     for name, aliases in sorted(name_to_aliases.iteritems()):
    222       if aliases:
    223         print "   {} (aka {})".format(name, ', '.join(aliases))
    224       else:
    225         print "   {}".format(name)
    226 
    227   def SetupLdPath(self, requires_build_dir):
    228     if requires_build_dir:
    229       self._EnsureBuildDirFound()
    230     elif not self._options.build_dir:
    231       return
    232 
    233     # Append build_dir to LD_LIBRARY_PATH so external libraries can be loaded.
    234     if (os.getenv("LD_LIBRARY_PATH")):
    235       os.putenv("LD_LIBRARY_PATH", "%s:%s" % (os.getenv("LD_LIBRARY_PATH"),
    236                                               self._options.build_dir))
    237     else:
    238       os.putenv("LD_LIBRARY_PATH", self._options.build_dir)
    239 
    240   def SimpleTest(self, module, name, valgrind_test_args=None, cmd_args=None):
    241     tool = valgrind_test.CreateTool(self._options.valgrind_tool)
    242     cmd = self._DefaultCommand(tool, name, valgrind_test_args)
    243     self._AppendGtestFilter(tool, name, cmd)
    244     cmd.extend(['--test-tiny-timeout=1000'])
    245     if cmd_args:
    246       cmd.extend(cmd_args)
    247 
    248     self.SetupLdPath(True)
    249     return tool.Run(cmd, module)
    250 
    251   def RunCmdLine(self):
    252     tool = valgrind_test.CreateTool(self._options.valgrind_tool)
    253     cmd = self._DefaultCommand(tool, None, self._args)
    254     self.SetupLdPath(False)
    255     return tool.Run(cmd, None)
    256 
    257   def TestAppList(self):
    258     return self.SimpleTest("app_list", "app_list_unittests")
    259 
    260   def TestAsh(self):
    261     return self.SimpleTest("ash", "ash_unittests")
    262 
    263   def TestAura(self):
    264     return self.SimpleTest("aura", "aura_unittests")
    265 
    266   def TestBase(self):
    267     return self.SimpleTest("base", "base_unittests")
    268 
    269   def TestChromeOS(self):
    270     return self.SimpleTest("chromeos", "chromeos_unittests")
    271 
    272   def TestComponents(self):
    273     return self.SimpleTest("components", "components_unittests")
    274 
    275   def TestCompositor(self):
    276     return self.SimpleTest("compositor", "compositor_unittests")
    277 
    278   def TestContent(self):
    279     return self.SimpleTest("content", "content_unittests")
    280 
    281   def TestContentBrowser(self):
    282     return self.SimpleTest("content", "content_browsertests")
    283 
    284   def TestCourgette(self):
    285     return self.SimpleTest("courgette", "courgette_unittests")
    286 
    287   def TestCrypto(self):
    288     return self.SimpleTest("crypto", "crypto_unittests")
    289 
    290   def TestDevice(self):
    291     return self.SimpleTest("device", "device_unittests")
    292 
    293   def TestEvents(self):
    294     return self.SimpleTest("events", "events_unittests")
    295 
    296   def TestFFmpeg(self):
    297     return self.SimpleTest("chrome", "ffmpeg_unittests")
    298 
    299   def TestFFmpegRegressions(self):
    300     return self.SimpleTest("chrome", "ffmpeg_regression_tests")
    301 
    302   def TestGPU(self):
    303     return self.SimpleTest("gpu", "gpu_unittests")
    304 
    305   def TestIpc(self):
    306     return self.SimpleTest("ipc", "ipc_tests",
    307                            valgrind_test_args=["--trace_children"])
    308 
    309   def TestJingle(self):
    310     return self.SimpleTest("chrome", "jingle_unittests")
    311 
    312   def TestMedia(self):
    313     return self.SimpleTest("chrome", "media_unittests")
    314 
    315   def TestMessageCenter(self):
    316     return self.SimpleTest("message_center", "message_center_unittests")
    317 
    318   def TestNet(self):
    319     return self.SimpleTest("net", "net_unittests")
    320 
    321   def TestNetPerf(self):
    322     return self.SimpleTest("net", "net_perftests")
    323 
    324   def TestPPAPI(self):
    325     return self.SimpleTest("chrome", "ppapi_unittests")
    326 
    327   def TestPrinting(self):
    328     return self.SimpleTest("chrome", "printing_unittests")
    329 
    330   def TestRemoting(self):
    331     return self.SimpleTest("chrome", "remoting_unittests",
    332                            cmd_args=[
    333                                "--ui-test-action-timeout=60000",
    334                                "--ui-test-action-max-timeout=150000"])
    335 
    336   def TestSql(self):
    337     return self.SimpleTest("chrome", "sql_unittests")
    338 
    339   def TestSync(self):
    340     return self.SimpleTest("chrome", "sync_unit_tests")
    341 
    342   def TestLinuxSandbox(self):
    343     return self.SimpleTest("sandbox", "sandbox_linux_unittests")
    344 
    345   def TestUnit(self):
    346     # http://crbug.com/51716
    347     # Disabling all unit tests
    348     # Problems reappeared after r119922
    349     if common.IsMac() and (self._options.valgrind_tool == "memcheck"):
    350       logging.warning("unit_tests are disabled for memcheck on MacOS.")
    351       return 0;
    352     return self.SimpleTest("chrome", "unit_tests")
    353 
    354   def TestUIUnit(self):
    355     return self.SimpleTest("chrome", "ui_unittests")
    356 
    357   def TestURL(self):
    358     return self.SimpleTest("chrome", "url_unittests")
    359 
    360   def TestViews(self):
    361     return self.SimpleTest("views", "views_unittests")
    362 
    363   # Valgrind timeouts are in seconds.
    364   UI_VALGRIND_ARGS = ["--timeout=14400", "--trace_children", "--indirect"]
    365   # UI test timeouts are in milliseconds.
    366   UI_TEST_ARGS = ["--ui-test-action-timeout=60000",
    367                   "--ui-test-action-max-timeout=150000",
    368                   "--no-sandbox"]
    369 
    370   # TODO(thestig) fine-tune these values.
    371   # Valgrind timeouts are in seconds.
    372   BROWSER_VALGRIND_ARGS = ["--timeout=50000", "--trace_children", "--indirect"]
    373   # Browser test timeouts are in milliseconds.
    374   BROWSER_TEST_ARGS = ["--ui-test-action-timeout=400000",
    375                        "--ui-test-action-max-timeout=800000",
    376                        "--no-sandbox"]
    377 
    378   def TestAutomatedUI(self):
    379     return self.SimpleTest("chrome", "automated_ui_tests",
    380                            valgrind_test_args=self.UI_VALGRIND_ARGS,
    381                            cmd_args=self.UI_TEST_ARGS)
    382 
    383   def TestBrowser(self):
    384     return self.SimpleTest("chrome", "browser_tests",
    385                            valgrind_test_args=self.BROWSER_VALGRIND_ARGS,
    386                            cmd_args=self.BROWSER_TEST_ARGS)
    387 
    388   def TestInteractiveUI(self):
    389     return self.SimpleTest("chrome", "interactive_ui_tests",
    390                            valgrind_test_args=self.UI_VALGRIND_ARGS,
    391                            cmd_args=self.UI_TEST_ARGS)
    392 
    393   def TestReliability(self):
    394     script_dir = path_utils.ScriptDir()
    395     url_list_file = os.path.join(script_dir, "reliability", "url_list.txt")
    396     return self.SimpleTest("chrome", "reliability_tests",
    397                            valgrind_test_args=self.UI_VALGRIND_ARGS,
    398                            cmd_args=(self.UI_TEST_ARGS +
    399                                      ["--list=%s" % url_list_file]))
    400 
    401   def TestSafeBrowsing(self):
    402     return self.SimpleTest("chrome", "safe_browsing_tests",
    403                            valgrind_test_args=self.UI_VALGRIND_ARGS,
    404                            cmd_args=(["--ui-test-action-max-timeout=450000"]))
    405 
    406   def TestSyncIntegration(self):
    407     return self.SimpleTest("chrome", "sync_integration_tests",
    408                            valgrind_test_args=self.UI_VALGRIND_ARGS,
    409                            cmd_args=(["--ui-test-action-max-timeout=450000"]))
    410 
    411   def TestLayoutChunk(self, chunk_num, chunk_size):
    412     # Run tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size) from the
    413     # list of tests.  Wrap around to beginning of list at end.
    414     # If chunk_size is zero, run all tests in the list once.
    415     # If a text file is given as argument, it is used as the list of tests.
    416     #
    417     # Build the ginormous commandline in 'cmd'.
    418     # It's going to be roughly
    419     #  python valgrind_test.py ... python run_webkit_tests.py ...
    420     # but we'll use the --indirect flag to valgrind_test.py
    421     # to avoid valgrinding python.
    422     # Start by building the valgrind_test.py commandline.
    423     tool = valgrind_test.CreateTool(self._options.valgrind_tool)
    424     cmd = self._DefaultCommand(tool)
    425     cmd.append("--trace_children")
    426     cmd.append("--indirect_webkit_layout")
    427     cmd.append("--ignore_exit_code")
    428     # Now build script_cmd, the run_webkits_tests.py commandline
    429     # Store each chunk in its own directory so that we can find the data later
    430     chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num)
    431     out_dir = os.path.join(path_utils.ScriptDir(), "latest")
    432     out_dir = os.path.join(out_dir, chunk_dir)
    433     if os.path.exists(out_dir):
    434       old_files = glob.glob(os.path.join(out_dir, "*.txt"))
    435       for f in old_files:
    436         os.remove(f)
    437     else:
    438       os.makedirs(out_dir)
    439     script = os.path.join(self._source_dir, "webkit", "tools", "layout_tests",
    440                           "run_webkit_tests.py")
    441     # http://crbug.com/260627: After the switch to content_shell from DRT, each
    442     # test now brings up 3 processes.  Under Valgrind, they become memory bound
    443     # and can eventually OOM if we don't reduce the total count.
    444     jobs = max(1, int(multiprocessing.cpu_count() * 0.4))
    445     script_cmd = ["python", script, "-v",
    446                   "--run-singly",  # run a separate DumpRenderTree for each test
    447                   "--fully-parallel",
    448                   "--child-processes=%d" % jobs,
    449                   "--time-out-ms=200000",
    450                   "--no-retry-failures",  # retrying takes too much time
    451                   # http://crbug.com/176908: Don't launch a browser when done.
    452                   "--no-show-results",
    453                   "--nocheck-sys-deps"]
    454     # Pass build mode to run_webkit_tests.py.  We aren't passed it directly,
    455     # so parse it out of build_dir.  run_webkit_tests.py can only handle
    456     # the two values "Release" and "Debug".
    457     # TODO(Hercules): unify how all our scripts pass around build mode
    458     # (--mode / --target / --build-dir / --debug)
    459     if self._options.build_dir:
    460       build_root, mode = os.path.split(self._options.build_dir)
    461       script_cmd.extend(["--build-directory", build_root, "--target", mode])
    462     if (chunk_size > 0):
    463       script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size))
    464     if len(self._args):
    465       # if the arg is a txt file, then treat it as a list of tests
    466       if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt":
    467         script_cmd.append("--test-list=%s" % self._args[0])
    468       else:
    469         script_cmd.extend(self._args)
    470     self._AppendGtestFilter(tool, "layout", script_cmd)
    471     # Now run script_cmd with the wrapper in cmd
    472     cmd.extend(["--"])
    473     cmd.extend(script_cmd)
    474 
    475     # Layout tests often times fail quickly, but the buildbot remains green.
    476     # Detect this situation when running with the default chunk size.
    477     if chunk_size == self.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE:
    478       min_runtime_in_seconds=120
    479     else:
    480       min_runtime_in_seconds=0
    481     ret = tool.Run(cmd, "layout", min_runtime_in_seconds=min_runtime_in_seconds)
    482     return ret
    483 
    484 
    485   def TestLayout(self):
    486     # A "chunk file" is maintained in the local directory so that each test
    487     # runs a slice of the layout tests of size chunk_size that increments with
    488     # each run.  Since tests can be added and removed from the layout tests at
    489     # any time, this is not going to give exact coverage, but it will allow us
    490     # to continuously run small slices of the layout tests under valgrind rather
    491     # than having to run all of them in one shot.
    492     chunk_size = self._options.num_tests
    493     if (chunk_size == 0):
    494       return self.TestLayoutChunk(0, 0)
    495     chunk_num = 0
    496     chunk_file = os.path.join("valgrind_layout_chunk.txt")
    497     logging.info("Reading state from " + chunk_file)
    498     try:
    499       f = open(chunk_file)
    500       if f:
    501         str = f.read()
    502         if len(str):
    503           chunk_num = int(str)
    504         # This should be enough so that we have a couple of complete runs
    505         # of test data stored in the archive (although note that when we loop
    506         # that we almost guaranteed won't be at the end of the test list)
    507         if chunk_num > 10000:
    508           chunk_num = 0
    509         f.close()
    510     except IOError, (errno, strerror):
    511       logging.error("error reading from file %s (%d, %s)" % (chunk_file,
    512                     errno, strerror))
    513     # Save the new chunk size before running the tests. Otherwise if a
    514     # particular chunk hangs the bot, the chunk number will never get
    515     # incremented and the bot will be wedged.
    516     logging.info("Saving state to " + chunk_file)
    517     try:
    518       f = open(chunk_file, "w")
    519       chunk_num += 1
    520       f.write("%d" % chunk_num)
    521       f.close()
    522     except IOError, (errno, strerror):
    523       logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno,
    524                     strerror))
    525     # Since we're running small chunks of the layout tests, it's important to
    526     # mark the ones that have errors in them.  These won't be visible in the
    527     # summary list for long, but will be useful for someone reviewing this bot.
    528     return self.TestLayoutChunk(chunk_num, chunk_size)
    529 
    530   # The known list of tests.
    531   # Recognise the original abbreviations as well as full executable names.
    532   _test_list = {
    533     "cmdline" : RunCmdLine,
    534     "app_list": TestAppList,     "app_list_unittests": TestAppList,
    535     "ash": TestAsh,              "ash_unittests": TestAsh,
    536     "aura": TestAura,            "aura_unittests": TestAura,
    537     "automated_ui" : TestAutomatedUI,
    538     "base": TestBase,            "base_unittests": TestBase,
    539     "browser": TestBrowser,      "browser_tests": TestBrowser,
    540     "chromeos": TestChromeOS,    "chromeos_unittests": TestChromeOS,
    541     "components": TestComponents,"components_unittests": TestComponents,
    542     "compositor": TestCompositor,"compositor_unittests": TestCompositor,
    543     "content": TestContent,      "content_unittests": TestContent,
    544     "content_browsertests": TestContentBrowser,
    545     "courgette": TestCourgette,  "courgette_unittests": TestCourgette,
    546     "crypto": TestCrypto,        "crypto_unittests": TestCrypto,
    547     "device": TestDevice,        "device_unittests": TestDevice,
    548     "events": TestEvents,        "events_unittests": TestEvents,
    549     "ffmpeg": TestFFmpeg,        "ffmpeg_unittests": TestFFmpeg,
    550     "ffmpeg_regression_tests": TestFFmpegRegressions,
    551     "gpu": TestGPU,              "gpu_unittests": TestGPU,
    552     "ipc": TestIpc,              "ipc_tests": TestIpc,
    553     "interactive_ui": TestInteractiveUI,
    554     "jingle": TestJingle,        "jingle_unittests": TestJingle,
    555     "layout": TestLayout,        "layout_tests": TestLayout,
    556     "webkit": TestLayout,
    557     "media": TestMedia,          "media_unittests": TestMedia,
    558     "message_center": TestMessageCenter,
    559     "message_center_unittests" : TestMessageCenter,
    560     "net": TestNet,              "net_unittests": TestNet,
    561     "net_perf": TestNetPerf,     "net_perftests": TestNetPerf,
    562     "ppapi": TestPPAPI,          "ppapi_unittests": TestPPAPI,
    563     "printing": TestPrinting,    "printing_unittests": TestPrinting,
    564     "reliability": TestReliability, "reliability_tests": TestReliability,
    565     "remoting": TestRemoting,    "remoting_unittests": TestRemoting,
    566     "safe_browsing": TestSafeBrowsing, "safe_browsing_tests": TestSafeBrowsing,
    567     "sandbox": TestLinuxSandbox, "sandbox_linux_unittests": TestLinuxSandbox,
    568     "sql": TestSql,              "sql_unittests": TestSql,
    569     "sync": TestSync,            "sync_unit_tests": TestSync,
    570     "sync_integration_tests": TestSyncIntegration,
    571     "sync_integration": TestSyncIntegration,
    572     "ui_unit": TestUIUnit,       "ui_unittests": TestUIUnit,
    573     "unit": TestUnit,            "unit_tests": TestUnit,
    574     "url": TestURL,              "url_unittests": TestURL,
    575     "views": TestViews,          "views_unittests": TestViews,
    576   }
    577 
    578 
    579 def _main():
    580   parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> "
    581                                  "[-t <test> ...]")
    582 
    583   parser.add_option("--help-tests", dest="help_tests", action="store_true",
    584                     default=False, help="List all available tests")
    585   parser.add_option("-b", "--build-dir",
    586                     help="the location of the compiler output")
    587   parser.add_option("--target", help="Debug or Release")
    588   parser.add_option("-t", "--test", action="append", default=[],
    589                     help="which test to run, supports test:gtest_filter format "
    590                          "as well.")
    591   parser.add_option("--baseline", action="store_true", default=False,
    592                     help="generate baseline data instead of validating")
    593   parser.add_option("--gtest_filter",
    594                     help="additional arguments to --gtest_filter")
    595   parser.add_option("--gtest_repeat", help="argument for --gtest_repeat")
    596   parser.add_option("--gtest_shuffle", action="store_true", default=False,
    597                     help="Randomize tests' orders on every iteration.")
    598   parser.add_option("-v", "--verbose", action="store_true", default=False,
    599                     help="verbose output - enable debug log messages")
    600   parser.add_option("--tool", dest="valgrind_tool", default="memcheck",
    601                     help="specify a valgrind tool to run the tests under")
    602   parser.add_option("--tool_flags", dest="valgrind_tool_flags", default="",
    603                     help="specify custom flags for the selected valgrind tool")
    604   parser.add_option("--keep_logs", action="store_true", default=False,
    605                     help="store memory tool logs in the <tool>.logs directory "
    606                          "instead of /tmp.\nThis can be useful for tool "
    607                          "developers/maintainers.\nPlease note that the <tool>"
    608                          ".logs directory will be clobbered on tool startup.")
    609   parser.add_option("-n", "--num_tests", type="int",
    610                     default=ChromeTests.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE,
    611                     help="for layout tests: # of subtests per run.  0 for all.")
    612   # TODO(thestig) Remove this if we can.
    613   parser.add_option("--gtest_color", dest="gtest_color", default="no",
    614                     help="dummy compatibility flag for sharding_supervisor.")
    615   parser.add_option("--brave-new-test-launcher", action="store_true",
    616                     help="run the tests with --brave-new-test-launcher")
    617   parser.add_option("--test-launcher-bot-mode", action="store_true",
    618                     help="run the tests with --test-launcher-bot-mode")
    619 
    620   options, args = parser.parse_args()
    621 
    622   # Bake target into build_dir.
    623   if options.target and options.build_dir:
    624     assert (options.target !=
    625             os.path.basename(os.path.dirname(options.build_dir)))
    626     options.build_dir = os.path.join(os.path.abspath(options.build_dir),
    627                                      options.target)
    628 
    629   if options.verbose:
    630     logging_utils.config_root(logging.DEBUG)
    631   else:
    632     logging_utils.config_root()
    633 
    634   if options.help_tests:
    635     ChromeTests.ShowTests()
    636     return 0
    637 
    638   if not options.test:
    639     parser.error("--test not specified")
    640 
    641   if len(options.test) != 1 and options.gtest_filter:
    642     parser.error("--gtest_filter and multiple tests don't make sense together")
    643 
    644   for t in options.test:
    645     tests = ChromeTests(options, args, t)
    646     ret = tests.Run()
    647     if ret: return ret
    648   return 0
    649 
    650 
    651 if __name__ == "__main__":
    652   sys.exit(_main())
    653