Home | History | Annotate | Download | only in android
      1 #!/usr/bin/env python
      2 #
      3 # Copyright 2013 The Chromium Authors. All rights reserved.
      4 # Use of this source code is governed by a BSD-style license that can be
      5 # found in the LICENSE file.
      6 
      7 """Runs all types of tests from one unified interface."""
      8 
      9 import collections
     10 import logging
     11 import optparse
     12 import os
     13 import shutil
     14 import signal
     15 import sys
     16 import threading
     17 import traceback
     18 
     19 from pylib import android_commands
     20 from pylib import constants
     21 from pylib import forwarder
     22 from pylib import ports
     23 from pylib.base import base_test_result
     24 from pylib.base import test_dispatcher
     25 from pylib.gtest import gtest_config
     26 from pylib.gtest import setup as gtest_setup
     27 from pylib.gtest import test_options as gtest_test_options
     28 from pylib.linker import setup as linker_setup
     29 from pylib.host_driven import setup as host_driven_setup
     30 from pylib.instrumentation import setup as instrumentation_setup
     31 from pylib.instrumentation import test_options as instrumentation_test_options
     32 from pylib.monkey import setup as monkey_setup
     33 from pylib.monkey import test_options as monkey_test_options
     34 from pylib.perf import setup as perf_setup
     35 from pylib.perf import test_options as perf_test_options
     36 from pylib.perf import test_runner as perf_test_runner
     37 from pylib.uiautomator import setup as uiautomator_setup
     38 from pylib.uiautomator import test_options as uiautomator_test_options
     39 from pylib.utils import command_option_parser
     40 from pylib.utils import report_results
     41 from pylib.utils import reraiser_thread
     42 from pylib.utils import run_tests_helper
     43 
     44 
     45 def AddCommonOptions(option_parser):
     46   """Adds all common options to |option_parser|."""
     47 
     48   group = optparse.OptionGroup(option_parser, 'Common Options')
     49   default_build_type = os.environ.get('BUILDTYPE', 'Debug')
     50   group.add_option('--debug', action='store_const', const='Debug',
     51                    dest='build_type', default=default_build_type,
     52                    help=('If set, run test suites under out/Debug. '
     53                          'Default is env var BUILDTYPE or Debug.'))
     54   group.add_option('--release', action='store_const',
     55                    const='Release', dest='build_type',
     56                    help=('If set, run test suites under out/Release.'
     57                          ' Default is env var BUILDTYPE or Debug.'))
     58   group.add_option('-c', dest='cleanup_test_files',
     59                    help='Cleanup test files on the device after run',
     60                    action='store_true')
     61   group.add_option('--num_retries', dest='num_retries', type='int',
     62                    default=2,
     63                    help=('Number of retries for a test before '
     64                          'giving up.'))
     65   group.add_option('-v',
     66                    '--verbose',
     67                    dest='verbose_count',
     68                    default=0,
     69                    action='count',
     70                    help='Verbose level (multiple times for more)')
     71   group.add_option('--tool',
     72                    dest='tool',
     73                    help=('Run the test under a tool '
     74                          '(use --tool help to list them)'))
     75   group.add_option('--flakiness-dashboard-server',
     76                    dest='flakiness_dashboard_server',
     77                    help=('Address of the server that is hosting the '
     78                          'Chrome for Android flakiness dashboard.'))
     79   group.add_option('--skip-deps-push', dest='push_deps',
     80                    action='store_false', default=True,
     81                    help=('Do not push dependencies to the device. '
     82                          'Use this at own risk for speeding up test '
     83                          'execution on local machine.'))
     84   group.add_option('-d', '--device', dest='test_device',
     85                    help=('Target device for the test suite '
     86                          'to run on.'))
     87   option_parser.add_option_group(group)
     88 
     89 
     90 def ProcessCommonOptions(options):
     91   """Processes and handles all common options."""
     92   run_tests_helper.SetLogLevel(options.verbose_count)
     93   constants.SetBuildType(options.build_type)
     94 
     95 
     96 def AddGTestOptions(option_parser):
     97   """Adds gtest options to |option_parser|."""
     98 
     99   option_parser.usage = '%prog gtest [options]'
    100   option_parser.commands_dict = {}
    101   option_parser.example = '%prog gtest -s base_unittests'
    102 
    103   # TODO(gkanwar): Make this option required
    104   option_parser.add_option('-s', '--suite', dest='suite_name',
    105                            help=('Executable name of the test suite to run '
    106                                  '(use -s help to list them).'))
    107   option_parser.add_option('-f', '--gtest_filter', '--gtest-filter',
    108                            dest='test_filter',
    109                            help='googletest-style filter string.')
    110   option_parser.add_option('--gtest_also_run_disabled_tests',
    111                            '--gtest-also-run-disabled-tests',
    112                            dest='run_disabled', action='store_true',
    113                            help='Also run disabled tests if applicable.')
    114   option_parser.add_option('-a', '--test-arguments', dest='test_arguments',
    115                            default='',
    116                            help='Additional arguments to pass to the test.')
    117   option_parser.add_option('-t', dest='timeout',
    118                            help='Timeout to wait for each test',
    119                            type='int',
    120                            default=60)
    121   # TODO(gkanwar): Move these to Common Options once we have the plumbing
    122   # in our other test types to handle these commands
    123   AddCommonOptions(option_parser)
    124 
    125 
    126 def AddLinkerTestOptions(option_parser):
    127   option_parser.usage = '%prog linker'
    128   option_parser.commands_dict = {}
    129   option_parser.example = '%prog linker'
    130 
    131   option_parser.add_option('-f', '--gtest-filter', dest='test_filter',
    132                            help='googletest-style filter string.')
    133   AddCommonOptions(option_parser)
    134 
    135 
    136 def ProcessGTestOptions(options):
    137   """Intercept test suite help to list test suites.
    138 
    139   Args:
    140     options: Command line options.
    141   """
    142   if options.suite_name == 'help':
    143     print 'Available test suites are:'
    144     for test_suite in (gtest_config.STABLE_TEST_SUITES +
    145                        gtest_config.EXPERIMENTAL_TEST_SUITES):
    146       print test_suite
    147     sys.exit(0)
    148 
    149   # Convert to a list, assuming all test suites if nothing was specified.
    150   # TODO(gkanwar): Require having a test suite
    151   if options.suite_name:
    152     options.suite_name = [options.suite_name]
    153   else:
    154     options.suite_name = [s for s in gtest_config.STABLE_TEST_SUITES]
    155 
    156 
    157 def AddJavaTestOptions(option_parser):
    158   """Adds the Java test options to |option_parser|."""
    159 
    160   option_parser.add_option('-f', '--test-filter', dest='test_filter',
    161                            help=('Test filter (if not fully qualified, '
    162                                  'will run all matches).'))
    163   option_parser.add_option(
    164       '-A', '--annotation', dest='annotation_str',
    165       help=('Comma-separated list of annotations. Run only tests with any of '
    166             'the given annotations. An annotation can be either a key or a '
    167             'key-values pair. A test that has no annotation is considered '
    168             '"SmallTest".'))
    169   option_parser.add_option(
    170       '-E', '--exclude-annotation', dest='exclude_annotation_str',
    171       help=('Comma-separated list of annotations. Exclude tests with these '
    172             'annotations.'))
    173   option_parser.add_option('--screenshot', dest='screenshot_failures',
    174                            action='store_true',
    175                            help='Capture screenshots of test failures')
    176   option_parser.add_option('--save-perf-json', action='store_true',
    177                            help='Saves the JSON file for each UI Perf test.')
    178   option_parser.add_option('--official-build', action='store_true',
    179                            help='Run official build tests.')
    180   option_parser.add_option('--test_data', action='append', default=[],
    181                            help=('Each instance defines a directory of test '
    182                                  'data that should be copied to the target(s) '
    183                                  'before running the tests. The argument '
    184                                  'should be of the form <target>:<source>, '
    185                                  '<target> is relative to the device data'
    186                                  'directory, and <source> is relative to the '
    187                                  'chromium build directory.'))
    188 
    189 
    190 def ProcessJavaTestOptions(options, error_func):
    191   """Processes options/arguments and populates |options| with defaults."""
    192 
    193   if options.annotation_str:
    194     options.annotations = options.annotation_str.split(',')
    195   elif options.test_filter:
    196     options.annotations = []
    197   else:
    198     options.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest',
    199                            'EnormousTest']
    200 
    201   if options.exclude_annotation_str:
    202     options.exclude_annotations = options.exclude_annotation_str.split(',')
    203   else:
    204     options.exclude_annotations = []
    205 
    206 
    207 def AddInstrumentationTestOptions(option_parser):
    208   """Adds Instrumentation test options to |option_parser|."""
    209 
    210   option_parser.usage = '%prog instrumentation [options]'
    211   option_parser.commands_dict = {}
    212   option_parser.example = ('%prog instrumentation '
    213                            '--test-apk=ChromiumTestShellTest')
    214 
    215   AddJavaTestOptions(option_parser)
    216   AddCommonOptions(option_parser)
    217 
    218   option_parser.add_option('-j', '--java-only', action='store_true',
    219                            default=False, help='Run only the Java tests.')
    220   option_parser.add_option('-p', '--python-only', action='store_true',
    221                            default=False,
    222                            help='Run only the host-driven tests.')
    223   option_parser.add_option('--host-driven-root',
    224                            help='Root of the host-driven tests.')
    225   option_parser.add_option('-w', '--wait_debugger', dest='wait_for_debugger',
    226                            action='store_true',
    227                            help='Wait for debugger.')
    228   option_parser.add_option(
    229       '--test-apk', dest='test_apk',
    230       help=('The name of the apk containing the tests '
    231             '(without the .apk extension; e.g. "ContentShellTest").'))
    232   option_parser.add_option('--coverage-dir',
    233                            help=('Directory in which to place all generated '
    234                                  'EMMA coverage files.'))
    235 
    236 
    237 def ProcessInstrumentationOptions(options, error_func):
    238   """Processes options/arguments and populate |options| with defaults.
    239 
    240   Args:
    241     options: optparse.Options object.
    242     error_func: Function to call with the error message in case of an error.
    243 
    244   Returns:
    245     An InstrumentationOptions named tuple which contains all options relevant to
    246     instrumentation tests.
    247   """
    248 
    249   ProcessJavaTestOptions(options, error_func)
    250 
    251   if options.java_only and options.python_only:
    252     error_func('Options java_only (-j) and python_only (-p) '
    253                'are mutually exclusive.')
    254   options.run_java_tests = True
    255   options.run_python_tests = True
    256   if options.java_only:
    257     options.run_python_tests = False
    258   elif options.python_only:
    259     options.run_java_tests = False
    260 
    261   if not options.host_driven_root:
    262     options.run_python_tests = False
    263 
    264   if not options.test_apk:
    265     error_func('--test-apk must be specified.')
    266 
    267 
    268   options.test_apk_path = os.path.join(constants.GetOutDirectory(),
    269                                        constants.SDK_BUILD_APKS_DIR,
    270                                        '%s.apk' % options.test_apk)
    271   options.test_apk_jar_path = os.path.join(
    272       constants.GetOutDirectory(),
    273       constants.SDK_BUILD_TEST_JAVALIB_DIR,
    274       '%s.jar' %  options.test_apk)
    275 
    276   return instrumentation_test_options.InstrumentationOptions(
    277       options.tool,
    278       options.cleanup_test_files,
    279       options.push_deps,
    280       options.annotations,
    281       options.exclude_annotations,
    282       options.test_filter,
    283       options.test_data,
    284       options.save_perf_json,
    285       options.screenshot_failures,
    286       options.wait_for_debugger,
    287       options.coverage_dir,
    288       options.test_apk,
    289       options.test_apk_path,
    290       options.test_apk_jar_path)
    291 
    292 
    293 def AddUIAutomatorTestOptions(option_parser):
    294   """Adds UI Automator test options to |option_parser|."""
    295 
    296   option_parser.usage = '%prog uiautomator [options]'
    297   option_parser.commands_dict = {}
    298   option_parser.example = (
    299       '%prog uiautomator --test-jar=chromium_testshell_uiautomator_tests'
    300       ' --package=chromium_test_shell')
    301   option_parser.add_option(
    302       '--package',
    303       help=('Package under test. Possible values: %s' %
    304             constants.PACKAGE_INFO.keys()))
    305   option_parser.add_option(
    306       '--test-jar', dest='test_jar',
    307       help=('The name of the dexed jar containing the tests (without the '
    308             '.dex.jar extension). Alternatively, this can be a full path '
    309             'to the jar.'))
    310 
    311   AddJavaTestOptions(option_parser)
    312   AddCommonOptions(option_parser)
    313 
    314 
    315 def ProcessUIAutomatorOptions(options, error_func):
    316   """Processes UIAutomator options/arguments.
    317 
    318   Args:
    319     options: optparse.Options object.
    320     error_func: Function to call with the error message in case of an error.
    321 
    322   Returns:
    323     A UIAutomatorOptions named tuple which contains all options relevant to
    324     uiautomator tests.
    325   """
    326 
    327   ProcessJavaTestOptions(options, error_func)
    328 
    329   if not options.package:
    330     error_func('--package is required.')
    331 
    332   if options.package not in constants.PACKAGE_INFO:
    333     error_func('Invalid package.')
    334 
    335   if not options.test_jar:
    336     error_func('--test-jar must be specified.')
    337 
    338   if os.path.exists(options.test_jar):
    339     # The dexed JAR is fully qualified, assume the info JAR lives along side.
    340     options.uiautomator_jar = options.test_jar
    341   else:
    342     options.uiautomator_jar = os.path.join(
    343         constants.GetOutDirectory(),
    344         constants.SDK_BUILD_JAVALIB_DIR,
    345         '%s.dex.jar' % options.test_jar)
    346   options.uiautomator_info_jar = (
    347       options.uiautomator_jar[:options.uiautomator_jar.find('.dex.jar')] +
    348       '_java.jar')
    349 
    350   return uiautomator_test_options.UIAutomatorOptions(
    351       options.tool,
    352       options.cleanup_test_files,
    353       options.push_deps,
    354       options.annotations,
    355       options.exclude_annotations,
    356       options.test_filter,
    357       options.test_data,
    358       options.save_perf_json,
    359       options.screenshot_failures,
    360       options.uiautomator_jar,
    361       options.uiautomator_info_jar,
    362       options.package)
    363 
    364 
    365 def AddMonkeyTestOptions(option_parser):
    366   """Adds monkey test options to |option_parser|."""
    367 
    368   option_parser.usage = '%prog monkey [options]'
    369   option_parser.commands_dict = {}
    370   option_parser.example = (
    371       '%prog monkey --package=chromium_test_shell')
    372 
    373   option_parser.add_option(
    374       '--package',
    375       help=('Package under test. Possible values: %s' %
    376             constants.PACKAGE_INFO.keys()))
    377   option_parser.add_option(
    378       '--event-count', default=10000, type='int',
    379       help='Number of events to generate [default: %default].')
    380   option_parser.add_option(
    381       '--category', default='',
    382       help='A list of allowed categories.')
    383   option_parser.add_option(
    384       '--throttle', default=100, type='int',
    385       help='Delay between events (ms) [default: %default]. ')
    386   option_parser.add_option(
    387       '--seed', type='int',
    388       help=('Seed value for pseudo-random generator. Same seed value generates '
    389             'the same sequence of events. Seed is randomized by default.'))
    390   option_parser.add_option(
    391       '--extra-args', default='',
    392       help=('String of other args to pass to the command verbatim '
    393             '[default: "%default"].'))
    394 
    395   AddCommonOptions(option_parser)
    396 
    397 
    398 def ProcessMonkeyTestOptions(options, error_func):
    399   """Processes all monkey test options.
    400 
    401   Args:
    402     options: optparse.Options object.
    403     error_func: Function to call with the error message in case of an error.
    404 
    405   Returns:
    406     A MonkeyOptions named tuple which contains all options relevant to
    407     monkey tests.
    408   """
    409   if not options.package:
    410     error_func('--package is required.')
    411 
    412   if options.package not in constants.PACKAGE_INFO:
    413     error_func('Invalid package.')
    414 
    415   category = options.category
    416   if category:
    417     category = options.category.split(',')
    418 
    419   return monkey_test_options.MonkeyOptions(
    420       options.verbose_count,
    421       options.package,
    422       options.event_count,
    423       category,
    424       options.throttle,
    425       options.seed,
    426       options.extra_args)
    427 
    428 
    429 def AddPerfTestOptions(option_parser):
    430   """Adds perf test options to |option_parser|."""
    431 
    432   option_parser.usage = '%prog perf [options]'
    433   option_parser.commands_dict = {}
    434   option_parser.example = ('%prog perf '
    435                            '[--single-step -- command args] or '
    436                            '[--steps perf_steps.json] or '
    437                            '[--print-step step]')
    438 
    439   option_parser.add_option(
    440       '--single-step',
    441       action='store_true',
    442       help='Execute the given command with retries, but only print the result '
    443            'for the "most successful" round.')
    444   option_parser.add_option(
    445       '--steps',
    446       help='JSON file containing the list of commands to run.')
    447   option_parser.add_option(
    448       '--flaky-steps',
    449       help=('A JSON file containing steps that are flaky '
    450             'and will have its exit code ignored.'))
    451   option_parser.add_option(
    452       '--print-step',
    453       help='The name of a previously executed perf step to print.')
    454   option_parser.add_option(
    455       '--no-timeout', action='store_true',
    456       help=('Do not impose a timeout. Each perf step is responsible for '
    457             'implementing the timeout logic.'))
    458   option_parser.add_option(
    459       '-f', '--test-filter',
    460       help=('Test filter (will match against the names listed in --steps).'))
    461   option_parser.add_option(
    462       '--dry-run',
    463       action='store_true',
    464       help='Just print the steps without executing.')
    465   AddCommonOptions(option_parser)
    466 
    467 
    468 def ProcessPerfTestOptions(options, args, error_func):
    469   """Processes all perf test options.
    470 
    471   Args:
    472     options: optparse.Options object.
    473     error_func: Function to call with the error message in case of an error.
    474 
    475   Returns:
    476     A PerfOptions named tuple which contains all options relevant to
    477     perf tests.
    478   """
    479   # Only one of steps, print_step or single_step must be provided.
    480   count = len(filter(None,
    481                      [options.steps, options.print_step, options.single_step]))
    482   if count != 1:
    483     error_func('Please specify one of: --steps, --print-step, --single-step.')
    484   single_step = None
    485   if options.single_step:
    486     single_step = ' '.join(args[2:])
    487   return perf_test_options.PerfOptions(
    488       options.steps, options.flaky_steps, options.print_step,
    489       options.no_timeout, options.test_filter, options.dry_run,
    490       single_step)
    491 
    492 
    493 def _RunGTests(options, error_func, devices):
    494   """Subcommand of RunTestsCommands which runs gtests."""
    495   ProcessGTestOptions(options)
    496 
    497   exit_code = 0
    498   for suite_name in options.suite_name:
    499     # TODO(gkanwar): Move this into ProcessGTestOptions once we require -s for
    500     # the gtest command.
    501     gtest_options = gtest_test_options.GTestOptions(
    502         options.tool,
    503         options.cleanup_test_files,
    504         options.push_deps,
    505         options.test_filter,
    506         options.run_disabled,
    507         options.test_arguments,
    508         options.timeout,
    509         suite_name)
    510     runner_factory, tests = gtest_setup.Setup(gtest_options, devices)
    511 
    512     results, test_exit_code = test_dispatcher.RunTests(
    513         tests, runner_factory, devices, shard=True, test_timeout=None,
    514         num_retries=options.num_retries)
    515 
    516     if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
    517       exit_code = test_exit_code
    518 
    519     report_results.LogFull(
    520         results=results,
    521         test_type='Unit test',
    522         test_package=suite_name,
    523         flakiness_server=options.flakiness_dashboard_server)
    524 
    525   if os.path.isdir(constants.ISOLATE_DEPS_DIR):
    526     shutil.rmtree(constants.ISOLATE_DEPS_DIR)
    527 
    528   return exit_code
    529 
    530 
    531 def _RunLinkerTests(options, error_func, devices):
    532   """Subcommand of RunTestsCommands which runs linker tests."""
    533   runner_factory, tests = linker_setup.Setup(options, devices)
    534 
    535   results, exit_code = test_dispatcher.RunTests(
    536       tests, runner_factory, devices, shard=True, test_timeout=60,
    537       num_retries=options.num_retries)
    538 
    539   report_results.LogFull(
    540       results=results,
    541       test_type='Linker test',
    542       test_package='ContentLinkerTest')
    543 
    544   return exit_code
    545 
    546 
    547 def _RunInstrumentationTests(options, error_func, devices):
    548   """Subcommand of RunTestsCommands which runs instrumentation tests."""
    549   instrumentation_options = ProcessInstrumentationOptions(options, error_func)
    550 
    551   if len(devices) > 1 and options.wait_for_debugger:
    552     logging.warning('Debugger can not be sharded, using first available device')
    553     devices = devices[:1]
    554 
    555   results = base_test_result.TestRunResults()
    556   exit_code = 0
    557 
    558   if options.run_java_tests:
    559     runner_factory, tests = instrumentation_setup.Setup(instrumentation_options)
    560 
    561     test_results, exit_code = test_dispatcher.RunTests(
    562         tests, runner_factory, devices, shard=True, test_timeout=None,
    563         num_retries=options.num_retries)
    564 
    565     results.AddTestRunResults(test_results)
    566 
    567   if options.run_python_tests:
    568     runner_factory, tests = host_driven_setup.InstrumentationSetup(
    569         options.host_driven_root, options.official_build,
    570         instrumentation_options)
    571 
    572     if tests:
    573       test_results, test_exit_code = test_dispatcher.RunTests(
    574           tests, runner_factory, devices, shard=True, test_timeout=None,
    575           num_retries=options.num_retries)
    576 
    577       results.AddTestRunResults(test_results)
    578 
    579       # Only allow exit code escalation
    580       if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
    581         exit_code = test_exit_code
    582 
    583   report_results.LogFull(
    584       results=results,
    585       test_type='Instrumentation',
    586       test_package=os.path.basename(options.test_apk),
    587       annotation=options.annotations,
    588       flakiness_server=options.flakiness_dashboard_server)
    589 
    590   return exit_code
    591 
    592 
    593 def _RunUIAutomatorTests(options, error_func, devices):
    594   """Subcommand of RunTestsCommands which runs uiautomator tests."""
    595   uiautomator_options = ProcessUIAutomatorOptions(options, error_func)
    596 
    597   runner_factory, tests = uiautomator_setup.Setup(uiautomator_options)
    598 
    599   results, exit_code = test_dispatcher.RunTests(
    600       tests, runner_factory, devices, shard=True, test_timeout=None,
    601       num_retries=options.num_retries)
    602 
    603   report_results.LogFull(
    604       results=results,
    605       test_type='UIAutomator',
    606       test_package=os.path.basename(options.test_jar),
    607       annotation=options.annotations,
    608       flakiness_server=options.flakiness_dashboard_server)
    609 
    610   return exit_code
    611 
    612 
    613 def _RunMonkeyTests(options, error_func, devices):
    614   """Subcommand of RunTestsCommands which runs monkey tests."""
    615   monkey_options = ProcessMonkeyTestOptions(options, error_func)
    616 
    617   runner_factory, tests = monkey_setup.Setup(monkey_options)
    618 
    619   results, exit_code = test_dispatcher.RunTests(
    620       tests, runner_factory, devices, shard=False, test_timeout=None,
    621       num_retries=options.num_retries)
    622 
    623   report_results.LogFull(
    624       results=results,
    625       test_type='Monkey',
    626       test_package='Monkey')
    627 
    628   return exit_code
    629 
    630 
    631 def _RunPerfTests(options, args, error_func, devices):
    632   """Subcommand of RunTestsCommands which runs perf tests."""
    633   perf_options = ProcessPerfTestOptions(options, args, error_func)
    634   # Just print the results from a single previously executed step.
    635   if perf_options.print_step:
    636     return perf_test_runner.PrintTestOutput(perf_options.print_step)
    637 
    638   runner_factory, tests = perf_setup.Setup(perf_options)
    639 
    640   results, _ = test_dispatcher.RunTests(
    641       tests, runner_factory, devices, shard=True, test_timeout=None,
    642       num_retries=options.num_retries)
    643 
    644   report_results.LogFull(
    645       results=results,
    646       test_type='Perf',
    647       test_package='Perf')
    648 
    649   if perf_options.single_step:
    650     return perf_test_runner.PrintTestOutput('single_step')
    651 
    652   # Always return 0 on the sharding stage. Individual tests exit_code
    653   # will be returned on the print_step stage.
    654   return 0
    655 
    656 
    657 def _GetAttachedDevices(test_device=None):
    658   """Get all attached devices.
    659 
    660   Args:
    661     test_device: Name of a specific device to use.
    662 
    663   Returns:
    664     A list of attached devices.
    665   """
    666   attached_devices = []
    667 
    668   attached_devices = android_commands.GetAttachedDevices()
    669   if test_device:
    670     assert test_device in attached_devices, (
    671         'Did not find device %s among attached device. Attached devices: %s'
    672         % (test_device, ', '.join(attached_devices)))
    673     attached_devices = [test_device]
    674 
    675   assert attached_devices, 'No devices attached.'
    676 
    677   return sorted(attached_devices)
    678 
    679 
    680 def RunTestsCommand(command, options, args, option_parser):
    681   """Checks test type and dispatches to the appropriate function.
    682 
    683   Args:
    684     command: String indicating the command that was received to trigger
    685         this function.
    686     options: optparse options dictionary.
    687     args: List of extra args from optparse.
    688     option_parser: optparse.OptionParser object.
    689 
    690   Returns:
    691     Integer indicated exit code.
    692 
    693   Raises:
    694     Exception: Unknown command name passed in, or an exception from an
    695         individual test runner.
    696   """
    697 
    698   # Check for extra arguments
    699   if len(args) > 2 and command != 'perf':
    700     option_parser.error('Unrecognized arguments: %s' % (' '.join(args[2:])))
    701     return constants.ERROR_EXIT_CODE
    702   if command == 'perf':
    703     if ((options.single_step and len(args) <= 2) or
    704         (not options.single_step and len(args) > 2)):
    705       option_parser.error('Unrecognized arguments: %s' % (' '.join(args)))
    706       return constants.ERROR_EXIT_CODE
    707 
    708   ProcessCommonOptions(options)
    709 
    710   devices = _GetAttachedDevices(options.test_device)
    711 
    712   forwarder.Forwarder.RemoveHostLog()
    713   if not ports.ResetTestServerPortAllocation():
    714     raise Exception('Failed to reset test server port.')
    715 
    716   if command == 'gtest':
    717     return _RunGTests(options, option_parser.error, devices)
    718   elif command == 'linker':
    719     return _RunLinkerTests(options, option_parser.error, devices)
    720   elif command == 'instrumentation':
    721     return _RunInstrumentationTests(options, option_parser.error, devices)
    722   elif command == 'uiautomator':
    723     return _RunUIAutomatorTests(options, option_parser.error, devices)
    724   elif command == 'monkey':
    725     return _RunMonkeyTests(options, option_parser.error, devices)
    726   elif command == 'perf':
    727     return _RunPerfTests(options, args, option_parser.error, devices)
    728   else:
    729     raise Exception('Unknown test type.')
    730 
    731 
    732 def HelpCommand(command, options, args, option_parser):
    733   """Display help for a certain command, or overall help.
    734 
    735   Args:
    736     command: String indicating the command that was received to trigger
    737         this function.
    738     options: optparse options dictionary.
    739     args: List of extra args from optparse.
    740     option_parser: optparse.OptionParser object.
    741 
    742   Returns:
    743     Integer indicated exit code.
    744   """
    745   # If we don't have any args, display overall help
    746   if len(args) < 3:
    747     option_parser.print_help()
    748     return 0
    749   # If we have too many args, print an error
    750   if len(args) > 3:
    751     option_parser.error('Unrecognized arguments: %s' % (' '.join(args[3:])))
    752     return constants.ERROR_EXIT_CODE
    753 
    754   command = args[2]
    755 
    756   if command not in VALID_COMMANDS:
    757     option_parser.error('Unrecognized command.')
    758 
    759   # Treat the help command as a special case. We don't care about showing a
    760   # specific help page for itself.
    761   if command == 'help':
    762     option_parser.print_help()
    763     return 0
    764 
    765   VALID_COMMANDS[command].add_options_func(option_parser)
    766   option_parser.usage = '%prog ' + command + ' [options]'
    767   option_parser.commands_dict = {}
    768   option_parser.print_help()
    769 
    770   return 0
    771 
    772 
    773 # Define a named tuple for the values in the VALID_COMMANDS dictionary so the
    774 # syntax is a bit prettier. The tuple is two functions: (add options, run
    775 # command).
    776 CommandFunctionTuple = collections.namedtuple(
    777     'CommandFunctionTuple', ['add_options_func', 'run_command_func'])
    778 VALID_COMMANDS = {
    779     'gtest': CommandFunctionTuple(AddGTestOptions, RunTestsCommand),
    780     'instrumentation': CommandFunctionTuple(
    781         AddInstrumentationTestOptions, RunTestsCommand),
    782     'uiautomator': CommandFunctionTuple(
    783         AddUIAutomatorTestOptions, RunTestsCommand),
    784     'monkey': CommandFunctionTuple(
    785         AddMonkeyTestOptions, RunTestsCommand),
    786     'perf': CommandFunctionTuple(
    787         AddPerfTestOptions, RunTestsCommand),
    788     'linker': CommandFunctionTuple(
    789         AddLinkerTestOptions, RunTestsCommand),
    790     'help': CommandFunctionTuple(lambda option_parser: None, HelpCommand)
    791     }
    792 
    793 
    794 def DumpThreadStacks(signal, frame):
    795   for thread in threading.enumerate():
    796     reraiser_thread.LogThreadStack(thread)
    797 
    798 
    799 def main(argv):
    800   signal.signal(signal.SIGUSR1, DumpThreadStacks)
    801   option_parser = command_option_parser.CommandOptionParser(
    802       commands_dict=VALID_COMMANDS)
    803   return command_option_parser.ParseAndExecute(option_parser)
    804 
    805 
    806 if __name__ == '__main__':
    807   sys.exit(main(sys.argv))
    808