Home | History | Annotate | Download | only in android
      1 #!/usr/bin/env python
      2 #
      3 # Copyright 2013 The Chromium Authors. All rights reserved.
      4 # Use of this source code is governed by a BSD-style license that can be
      5 # found in the LICENSE file.
      6 
      7 """Runs all types of tests from one unified interface."""
      8 
      9 import collections
     10 import logging
     11 import optparse
     12 import os
     13 import shutil
     14 import signal
     15 import sys
     16 import threading
     17 
     18 from pylib import android_commands
     19 from pylib import constants
     20 from pylib import forwarder
     21 from pylib import ports
     22 from pylib.base import base_test_result
     23 from pylib.base import test_dispatcher
     24 from pylib.gtest import gtest_config
     25 from pylib.gtest import setup as gtest_setup
     26 from pylib.gtest import test_options as gtest_test_options
     27 from pylib.linker import setup as linker_setup
     28 from pylib.host_driven import setup as host_driven_setup
     29 from pylib.instrumentation import setup as instrumentation_setup
     30 from pylib.instrumentation import test_options as instrumentation_test_options
     31 from pylib.monkey import setup as monkey_setup
     32 from pylib.monkey import test_options as monkey_test_options
     33 from pylib.perf import setup as perf_setup
     34 from pylib.perf import test_options as perf_test_options
     35 from pylib.perf import test_runner as perf_test_runner
     36 from pylib.uiautomator import setup as uiautomator_setup
     37 from pylib.uiautomator import test_options as uiautomator_test_options
     38 from pylib.utils import command_option_parser
     39 from pylib.utils import report_results
     40 from pylib.utils import reraiser_thread
     41 from pylib.utils import run_tests_helper
     42 
     43 
     44 def AddCommonOptions(option_parser):
     45   """Adds all common options to |option_parser|."""
     46 
     47   group = optparse.OptionGroup(option_parser, 'Common Options')
     48   default_build_type = os.environ.get('BUILDTYPE', 'Debug')
     49   group.add_option('--debug', action='store_const', const='Debug',
     50                    dest='build_type', default=default_build_type,
     51                    help=('If set, run test suites under out/Debug. '
     52                          'Default is env var BUILDTYPE or Debug.'))
     53   group.add_option('--release', action='store_const',
     54                    const='Release', dest='build_type',
     55                    help=('If set, run test suites under out/Release.'
     56                          ' Default is env var BUILDTYPE or Debug.'))
     57   group.add_option('-c', dest='cleanup_test_files',
     58                    help='Cleanup test files on the device after run',
     59                    action='store_true')
     60   group.add_option('--num_retries', dest='num_retries', type='int',
     61                    default=2,
     62                    help=('Number of retries for a test before '
     63                          'giving up.'))
     64   group.add_option('-v',
     65                    '--verbose',
     66                    dest='verbose_count',
     67                    default=0,
     68                    action='count',
     69                    help='Verbose level (multiple times for more)')
     70   group.add_option('--tool',
     71                    dest='tool',
     72                    help=('Run the test under a tool '
     73                          '(use --tool help to list them)'))
     74   group.add_option('--flakiness-dashboard-server',
     75                    dest='flakiness_dashboard_server',
     76                    help=('Address of the server that is hosting the '
     77                          'Chrome for Android flakiness dashboard.'))
     78   group.add_option('--skip-deps-push', dest='push_deps',
     79                    action='store_false', default=True,
     80                    help=('Do not push dependencies to the device. '
     81                          'Use this at own risk for speeding up test '
     82                          'execution on local machine.'))
     83   group.add_option('-d', '--device', dest='test_device',
     84                    help=('Target device for the test suite '
     85                          'to run on.'))
     86   option_parser.add_option_group(group)
     87 
     88 
     89 def ProcessCommonOptions(options):
     90   """Processes and handles all common options."""
     91   run_tests_helper.SetLogLevel(options.verbose_count)
     92   constants.SetBuildType(options.build_type)
     93 
     94 
     95 def AddGTestOptions(option_parser):
     96   """Adds gtest options to |option_parser|."""
     97 
     98   option_parser.usage = '%prog gtest [options]'
     99   option_parser.commands_dict = {}
    100   option_parser.example = '%prog gtest -s base_unittests'
    101 
    102   # TODO(gkanwar): Make this option required
    103   option_parser.add_option('-s', '--suite', dest='suite_name',
    104                            help=('Executable name of the test suite to run '
    105                                  '(use -s help to list them).'))
    106   option_parser.add_option('-f', '--gtest_filter', '--gtest-filter',
    107                            dest='test_filter',
    108                            help='googletest-style filter string.')
    109   option_parser.add_option('--gtest_also_run_disabled_tests',
    110                            '--gtest-also-run-disabled-tests',
    111                            dest='run_disabled', action='store_true',
    112                            help='Also run disabled tests if applicable.')
    113   option_parser.add_option('-a', '--test-arguments', dest='test_arguments',
    114                            default='',
    115                            help='Additional arguments to pass to the test.')
    116   option_parser.add_option('-t', dest='timeout',
    117                            help='Timeout to wait for each test',
    118                            type='int',
    119                            default=60)
    120   option_parser.add_option('--isolate_file_path',
    121                            '--isolate-file-path',
    122                            dest='isolate_file_path',
    123                            help='.isolate file path to override the default '
    124                                 'path')
    125   # TODO(gkanwar): Move these to Common Options once we have the plumbing
    126   # in our other test types to handle these commands
    127   AddCommonOptions(option_parser)
    128 
    129 
    130 def AddLinkerTestOptions(option_parser):
    131   option_parser.usage = '%prog linker'
    132   option_parser.commands_dict = {}
    133   option_parser.example = '%prog linker'
    134 
    135   option_parser.add_option('-f', '--gtest-filter', dest='test_filter',
    136                            help='googletest-style filter string.')
    137   AddCommonOptions(option_parser)
    138 
    139 
    140 def ProcessGTestOptions(options):
    141   """Intercept test suite help to list test suites.
    142 
    143   Args:
    144     options: Command line options.
    145   """
    146   if options.suite_name == 'help':
    147     print 'Available test suites are:'
    148     for test_suite in (gtest_config.STABLE_TEST_SUITES +
    149                        gtest_config.EXPERIMENTAL_TEST_SUITES):
    150       print test_suite
    151     sys.exit(0)
    152 
    153   # Convert to a list, assuming all test suites if nothing was specified.
    154   # TODO(gkanwar): Require having a test suite
    155   if options.suite_name:
    156     options.suite_name = [options.suite_name]
    157   else:
    158     options.suite_name = [s for s in gtest_config.STABLE_TEST_SUITES]
    159 
    160 
    161 def AddJavaTestOptions(option_parser):
    162   """Adds the Java test options to |option_parser|."""
    163 
    164   option_parser.add_option('-f', '--test-filter', dest='test_filter',
    165                            help=('Test filter (if not fully qualified, '
    166                                  'will run all matches).'))
    167   option_parser.add_option(
    168       '-A', '--annotation', dest='annotation_str',
    169       help=('Comma-separated list of annotations. Run only tests with any of '
    170             'the given annotations. An annotation can be either a key or a '
    171             'key-values pair. A test that has no annotation is considered '
    172             '"SmallTest".'))
    173   option_parser.add_option(
    174       '-E', '--exclude-annotation', dest='exclude_annotation_str',
    175       help=('Comma-separated list of annotations. Exclude tests with these '
    176             'annotations.'))
    177   option_parser.add_option('--screenshot', dest='screenshot_failures',
    178                            action='store_true',
    179                            help='Capture screenshots of test failures')
    180   option_parser.add_option('--save-perf-json', action='store_true',
    181                            help='Saves the JSON file for each UI Perf test.')
    182   option_parser.add_option('--official-build', action='store_true',
    183                            help='Run official build tests.')
    184   option_parser.add_option('--test_data', action='append', default=[],
    185                            help=('Each instance defines a directory of test '
    186                                  'data that should be copied to the target(s) '
    187                                  'before running the tests. The argument '
    188                                  'should be of the form <target>:<source>, '
    189                                  '<target> is relative to the device data'
    190                                  'directory, and <source> is relative to the '
    191                                  'chromium build directory.'))
    192 
    193 
    194 def ProcessJavaTestOptions(options):
    195   """Processes options/arguments and populates |options| with defaults."""
    196 
    197   if options.annotation_str:
    198     options.annotations = options.annotation_str.split(',')
    199   elif options.test_filter:
    200     options.annotations = []
    201   else:
    202     options.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest',
    203                            'EnormousTest']
    204 
    205   if options.exclude_annotation_str:
    206     options.exclude_annotations = options.exclude_annotation_str.split(',')
    207   else:
    208     options.exclude_annotations = []
    209 
    210 
    211 def AddInstrumentationTestOptions(option_parser):
    212   """Adds Instrumentation test options to |option_parser|."""
    213 
    214   option_parser.usage = '%prog instrumentation [options]'
    215   option_parser.commands_dict = {}
    216   option_parser.example = ('%prog instrumentation '
    217                            '--test-apk=ChromeShellTest')
    218 
    219   AddJavaTestOptions(option_parser)
    220   AddCommonOptions(option_parser)
    221 
    222   option_parser.add_option('-j', '--java-only', action='store_true',
    223                            default=False, help='Run only the Java tests.')
    224   option_parser.add_option('-p', '--python-only', action='store_true',
    225                            default=False,
    226                            help='Run only the host-driven tests.')
    227   option_parser.add_option('--host-driven-root',
    228                            help='Root of the host-driven tests.')
    229   option_parser.add_option('-w', '--wait_debugger', dest='wait_for_debugger',
    230                            action='store_true',
    231                            help='Wait for debugger.')
    232   option_parser.add_option(
    233       '--test-apk', dest='test_apk',
    234       help=('The name of the apk containing the tests '
    235             '(without the .apk extension; e.g. "ContentShellTest").'))
    236   option_parser.add_option('--coverage-dir',
    237                            help=('Directory in which to place all generated '
    238                                  'EMMA coverage files.'))
    239 
    240 
    241 def ProcessInstrumentationOptions(options, error_func):
    242   """Processes options/arguments and populate |options| with defaults.
    243 
    244   Args:
    245     options: optparse.Options object.
    246     error_func: Function to call with the error message in case of an error.
    247 
    248   Returns:
    249     An InstrumentationOptions named tuple which contains all options relevant to
    250     instrumentation tests.
    251   """
    252 
    253   ProcessJavaTestOptions(options)
    254 
    255   if options.java_only and options.python_only:
    256     error_func('Options java_only (-j) and python_only (-p) '
    257                'are mutually exclusive.')
    258   options.run_java_tests = True
    259   options.run_python_tests = True
    260   if options.java_only:
    261     options.run_python_tests = False
    262   elif options.python_only:
    263     options.run_java_tests = False
    264 
    265   if not options.host_driven_root:
    266     options.run_python_tests = False
    267 
    268   if not options.test_apk:
    269     error_func('--test-apk must be specified.')
    270 
    271 
    272   options.test_apk_path = os.path.join(constants.GetOutDirectory(),
    273                                        constants.SDK_BUILD_APKS_DIR,
    274                                        '%s.apk' % options.test_apk)
    275   options.test_apk_jar_path = os.path.join(
    276       constants.GetOutDirectory(),
    277       constants.SDK_BUILD_TEST_JAVALIB_DIR,
    278       '%s.jar' %  options.test_apk)
    279 
    280   return instrumentation_test_options.InstrumentationOptions(
    281       options.tool,
    282       options.cleanup_test_files,
    283       options.push_deps,
    284       options.annotations,
    285       options.exclude_annotations,
    286       options.test_filter,
    287       options.test_data,
    288       options.save_perf_json,
    289       options.screenshot_failures,
    290       options.wait_for_debugger,
    291       options.coverage_dir,
    292       options.test_apk,
    293       options.test_apk_path,
    294       options.test_apk_jar_path)
    295 
    296 
    297 def AddUIAutomatorTestOptions(option_parser):
    298   """Adds UI Automator test options to |option_parser|."""
    299 
    300   option_parser.usage = '%prog uiautomator [options]'
    301   option_parser.commands_dict = {}
    302   option_parser.example = (
    303       '%prog uiautomator --test-jar=chrome_shell_uiautomator_tests'
    304       ' --package=chrome_shell')
    305   option_parser.add_option(
    306       '--package',
    307       help=('Package under test. Possible values: %s' %
    308             constants.PACKAGE_INFO.keys()))
    309   option_parser.add_option(
    310       '--test-jar', dest='test_jar',
    311       help=('The name of the dexed jar containing the tests (without the '
    312             '.dex.jar extension). Alternatively, this can be a full path '
    313             'to the jar.'))
    314 
    315   AddJavaTestOptions(option_parser)
    316   AddCommonOptions(option_parser)
    317 
    318 
    319 def ProcessUIAutomatorOptions(options, error_func):
    320   """Processes UIAutomator options/arguments.
    321 
    322   Args:
    323     options: optparse.Options object.
    324     error_func: Function to call with the error message in case of an error.
    325 
    326   Returns:
    327     A UIAutomatorOptions named tuple which contains all options relevant to
    328     uiautomator tests.
    329   """
    330 
    331   ProcessJavaTestOptions(options)
    332 
    333   if not options.package:
    334     error_func('--package is required.')
    335 
    336   if options.package not in constants.PACKAGE_INFO:
    337     error_func('Invalid package.')
    338 
    339   if not options.test_jar:
    340     error_func('--test-jar must be specified.')
    341 
    342   if os.path.exists(options.test_jar):
    343     # The dexed JAR is fully qualified, assume the info JAR lives along side.
    344     options.uiautomator_jar = options.test_jar
    345   else:
    346     options.uiautomator_jar = os.path.join(
    347         constants.GetOutDirectory(),
    348         constants.SDK_BUILD_JAVALIB_DIR,
    349         '%s.dex.jar' % options.test_jar)
    350   options.uiautomator_info_jar = (
    351       options.uiautomator_jar[:options.uiautomator_jar.find('.dex.jar')] +
    352       '_java.jar')
    353 
    354   return uiautomator_test_options.UIAutomatorOptions(
    355       options.tool,
    356       options.cleanup_test_files,
    357       options.push_deps,
    358       options.annotations,
    359       options.exclude_annotations,
    360       options.test_filter,
    361       options.test_data,
    362       options.save_perf_json,
    363       options.screenshot_failures,
    364       options.uiautomator_jar,
    365       options.uiautomator_info_jar,
    366       options.package)
    367 
    368 
    369 def AddMonkeyTestOptions(option_parser):
    370   """Adds monkey test options to |option_parser|."""
    371 
    372   option_parser.usage = '%prog monkey [options]'
    373   option_parser.commands_dict = {}
    374   option_parser.example = (
    375       '%prog monkey --package=chrome_shell')
    376 
    377   option_parser.add_option(
    378       '--package',
    379       help=('Package under test. Possible values: %s' %
    380             constants.PACKAGE_INFO.keys()))
    381   option_parser.add_option(
    382       '--event-count', default=10000, type='int',
    383       help='Number of events to generate [default: %default].')
    384   option_parser.add_option(
    385       '--category', default='',
    386       help='A list of allowed categories.')
    387   option_parser.add_option(
    388       '--throttle', default=100, type='int',
    389       help='Delay between events (ms) [default: %default]. ')
    390   option_parser.add_option(
    391       '--seed', type='int',
    392       help=('Seed value for pseudo-random generator. Same seed value generates '
    393             'the same sequence of events. Seed is randomized by default.'))
    394   option_parser.add_option(
    395       '--extra-args', default='',
    396       help=('String of other args to pass to the command verbatim '
    397             '[default: "%default"].'))
    398 
    399   AddCommonOptions(option_parser)
    400 
    401 
    402 def ProcessMonkeyTestOptions(options, error_func):
    403   """Processes all monkey test options.
    404 
    405   Args:
    406     options: optparse.Options object.
    407     error_func: Function to call with the error message in case of an error.
    408 
    409   Returns:
    410     A MonkeyOptions named tuple which contains all options relevant to
    411     monkey tests.
    412   """
    413   if not options.package:
    414     error_func('--package is required.')
    415 
    416   if options.package not in constants.PACKAGE_INFO:
    417     error_func('Invalid package.')
    418 
    419   category = options.category
    420   if category:
    421     category = options.category.split(',')
    422 
    423   return monkey_test_options.MonkeyOptions(
    424       options.verbose_count,
    425       options.package,
    426       options.event_count,
    427       category,
    428       options.throttle,
    429       options.seed,
    430       options.extra_args)
    431 
    432 
    433 def AddPerfTestOptions(option_parser):
    434   """Adds perf test options to |option_parser|."""
    435 
    436   option_parser.usage = '%prog perf [options]'
    437   option_parser.commands_dict = {}
    438   option_parser.example = ('%prog perf '
    439                            '[--single-step -- command args] or '
    440                            '[--steps perf_steps.json] or '
    441                            '[--print-step step]')
    442 
    443   option_parser.add_option(
    444       '--single-step',
    445       action='store_true',
    446       help='Execute the given command with retries, but only print the result '
    447            'for the "most successful" round.')
    448   option_parser.add_option(
    449       '--steps',
    450       help='JSON file containing the list of commands to run.')
    451   option_parser.add_option(
    452       '--flaky-steps',
    453       help=('A JSON file containing steps that are flaky '
    454             'and will have its exit code ignored.'))
    455   option_parser.add_option(
    456       '--output-json-list',
    457       help='Write a simple list of names from --steps into the given file.')
    458   option_parser.add_option(
    459       '--print-step',
    460       help='The name of a previously executed perf step to print.')
    461   option_parser.add_option(
    462       '--no-timeout', action='store_true',
    463       help=('Do not impose a timeout. Each perf step is responsible for '
    464             'implementing the timeout logic.'))
    465   option_parser.add_option(
    466       '-f', '--test-filter',
    467       help=('Test filter (will match against the names listed in --steps).'))
    468   option_parser.add_option(
    469       '--dry-run',
    470       action='store_true',
    471       help='Just print the steps without executing.')
    472   AddCommonOptions(option_parser)
    473 
    474 
    475 def ProcessPerfTestOptions(options, args, error_func):
    476   """Processes all perf test options.
    477 
    478   Args:
    479     options: optparse.Options object.
    480     error_func: Function to call with the error message in case of an error.
    481 
    482   Returns:
    483     A PerfOptions named tuple which contains all options relevant to
    484     perf tests.
    485   """
    486   # Only one of steps, print_step or single_step must be provided.
    487   count = len(filter(None,
    488                      [options.steps, options.print_step, options.single_step]))
    489   if count != 1:
    490     error_func('Please specify one of: --steps, --print-step, --single-step.')
    491   single_step = None
    492   if options.single_step:
    493     single_step = ' '.join(args[2:])
    494   return perf_test_options.PerfOptions(
    495       options.steps, options.flaky_steps, options.output_json_list,
    496       options.print_step, options.no_timeout, options.test_filter,
    497       options.dry_run, single_step)
    498 
    499 
    500 def _RunGTests(options, devices):
    501   """Subcommand of RunTestsCommands which runs gtests."""
    502   ProcessGTestOptions(options)
    503 
    504   exit_code = 0
    505   for suite_name in options.suite_name:
    506     # TODO(gkanwar): Move this into ProcessGTestOptions once we require -s for
    507     # the gtest command.
    508     gtest_options = gtest_test_options.GTestOptions(
    509         options.tool,
    510         options.cleanup_test_files,
    511         options.push_deps,
    512         options.test_filter,
    513         options.run_disabled,
    514         options.test_arguments,
    515         options.timeout,
    516         options.isolate_file_path,
    517         suite_name)
    518     runner_factory, tests = gtest_setup.Setup(gtest_options, devices)
    519 
    520     results, test_exit_code = test_dispatcher.RunTests(
    521         tests, runner_factory, devices, shard=True, test_timeout=None,
    522         num_retries=options.num_retries)
    523 
    524     if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
    525       exit_code = test_exit_code
    526 
    527     report_results.LogFull(
    528         results=results,
    529         test_type='Unit test',
    530         test_package=suite_name,
    531         flakiness_server=options.flakiness_dashboard_server)
    532 
    533   if os.path.isdir(constants.ISOLATE_DEPS_DIR):
    534     shutil.rmtree(constants.ISOLATE_DEPS_DIR)
    535 
    536   return exit_code
    537 
    538 
    539 def _RunLinkerTests(options, devices):
    540   """Subcommand of RunTestsCommands which runs linker tests."""
    541   runner_factory, tests = linker_setup.Setup(options, devices)
    542 
    543   results, exit_code = test_dispatcher.RunTests(
    544       tests, runner_factory, devices, shard=True, test_timeout=60,
    545       num_retries=options.num_retries)
    546 
    547   report_results.LogFull(
    548       results=results,
    549       test_type='Linker test',
    550       test_package='ChromiumLinkerTest')
    551 
    552   return exit_code
    553 
    554 
    555 def _RunInstrumentationTests(options, error_func, devices):
    556   """Subcommand of RunTestsCommands which runs instrumentation tests."""
    557   instrumentation_options = ProcessInstrumentationOptions(options, error_func)
    558 
    559   if len(devices) > 1 and options.wait_for_debugger:
    560     logging.warning('Debugger can not be sharded, using first available device')
    561     devices = devices[:1]
    562 
    563   results = base_test_result.TestRunResults()
    564   exit_code = 0
    565 
    566   if options.run_java_tests:
    567     runner_factory, tests = instrumentation_setup.Setup(instrumentation_options)
    568 
    569     test_results, exit_code = test_dispatcher.RunTests(
    570         tests, runner_factory, devices, shard=True, test_timeout=None,
    571         num_retries=options.num_retries)
    572 
    573     results.AddTestRunResults(test_results)
    574 
    575   if options.run_python_tests:
    576     runner_factory, tests = host_driven_setup.InstrumentationSetup(
    577         options.host_driven_root, options.official_build,
    578         instrumentation_options)
    579 
    580     if tests:
    581       test_results, test_exit_code = test_dispatcher.RunTests(
    582           tests, runner_factory, devices, shard=True, test_timeout=None,
    583           num_retries=options.num_retries)
    584 
    585       results.AddTestRunResults(test_results)
    586 
    587       # Only allow exit code escalation
    588       if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
    589         exit_code = test_exit_code
    590 
    591   report_results.LogFull(
    592       results=results,
    593       test_type='Instrumentation',
    594       test_package=os.path.basename(options.test_apk),
    595       annotation=options.annotations,
    596       flakiness_server=options.flakiness_dashboard_server)
    597 
    598   return exit_code
    599 
    600 
    601 def _RunUIAutomatorTests(options, error_func, devices):
    602   """Subcommand of RunTestsCommands which runs uiautomator tests."""
    603   uiautomator_options = ProcessUIAutomatorOptions(options, error_func)
    604 
    605   runner_factory, tests = uiautomator_setup.Setup(uiautomator_options)
    606 
    607   results, exit_code = test_dispatcher.RunTests(
    608       tests, runner_factory, devices, shard=True, test_timeout=None,
    609       num_retries=options.num_retries)
    610 
    611   report_results.LogFull(
    612       results=results,
    613       test_type='UIAutomator',
    614       test_package=os.path.basename(options.test_jar),
    615       annotation=options.annotations,
    616       flakiness_server=options.flakiness_dashboard_server)
    617 
    618   return exit_code
    619 
    620 
    621 def _RunMonkeyTests(options, error_func, devices):
    622   """Subcommand of RunTestsCommands which runs monkey tests."""
    623   monkey_options = ProcessMonkeyTestOptions(options, error_func)
    624 
    625   runner_factory, tests = monkey_setup.Setup(monkey_options)
    626 
    627   results, exit_code = test_dispatcher.RunTests(
    628       tests, runner_factory, devices, shard=False, test_timeout=None,
    629       num_retries=options.num_retries)
    630 
    631   report_results.LogFull(
    632       results=results,
    633       test_type='Monkey',
    634       test_package='Monkey')
    635 
    636   return exit_code
    637 
    638 
    639 def _RunPerfTests(options, args, error_func):
    640   """Subcommand of RunTestsCommands which runs perf tests."""
    641   perf_options = ProcessPerfTestOptions(options, args, error_func)
    642 
    643   # Just save a simple json with a list of test names.
    644   if perf_options.output_json_list:
    645     return perf_test_runner.OutputJsonList(
    646         perf_options.steps, perf_options.output_json_list)
    647 
    648   # Just print the results from a single previously executed step.
    649   if perf_options.print_step:
    650     return perf_test_runner.PrintTestOutput(perf_options.print_step)
    651 
    652   runner_factory, tests, devices = perf_setup.Setup(perf_options)
    653 
    654   # shard=False means that each device will get the full list of tests
    655   # and then each one will decide their own affinity.
    656   # shard=True means each device will pop the next test available from a queue,
    657   # which increases throughput but have no affinity.
    658   results, _ = test_dispatcher.RunTests(
    659       tests, runner_factory, devices, shard=False, test_timeout=None,
    660       num_retries=options.num_retries)
    661 
    662   report_results.LogFull(
    663       results=results,
    664       test_type='Perf',
    665       test_package='Perf')
    666 
    667   if perf_options.single_step:
    668     return perf_test_runner.PrintTestOutput('single_step')
    669 
    670   perf_test_runner.PrintSummary(tests)
    671 
    672   # Always return 0 on the sharding stage. Individual tests exit_code
    673   # will be returned on the print_step stage.
    674   return 0
    675 
    676 
    677 def _GetAttachedDevices(test_device=None):
    678   """Get all attached devices.
    679 
    680   Args:
    681     test_device: Name of a specific device to use.
    682 
    683   Returns:
    684     A list of attached devices.
    685   """
    686   attached_devices = []
    687 
    688   attached_devices = android_commands.GetAttachedDevices()
    689   if test_device:
    690     assert test_device in attached_devices, (
    691         'Did not find device %s among attached device. Attached devices: %s'
    692         % (test_device, ', '.join(attached_devices)))
    693     attached_devices = [test_device]
    694 
    695   assert attached_devices, 'No devices attached.'
    696 
    697   return sorted(attached_devices)
    698 
    699 
    700 def RunTestsCommand(command, options, args, option_parser):
    701   """Checks test type and dispatches to the appropriate function.
    702 
    703   Args:
    704     command: String indicating the command that was received to trigger
    705         this function.
    706     options: optparse options dictionary.
    707     args: List of extra args from optparse.
    708     option_parser: optparse.OptionParser object.
    709 
    710   Returns:
    711     Integer indicated exit code.
    712 
    713   Raises:
    714     Exception: Unknown command name passed in, or an exception from an
    715         individual test runner.
    716   """
    717 
    718   # Check for extra arguments
    719   if len(args) > 2 and command != 'perf':
    720     option_parser.error('Unrecognized arguments: %s' % (' '.join(args[2:])))
    721     return constants.ERROR_EXIT_CODE
    722   if command == 'perf':
    723     if ((options.single_step and len(args) <= 2) or
    724         (not options.single_step and len(args) > 2)):
    725       option_parser.error('Unrecognized arguments: %s' % (' '.join(args)))
    726       return constants.ERROR_EXIT_CODE
    727 
    728   ProcessCommonOptions(options)
    729 
    730   devices = _GetAttachedDevices(options.test_device)
    731 
    732   forwarder.Forwarder.RemoveHostLog()
    733   if not ports.ResetTestServerPortAllocation():
    734     raise Exception('Failed to reset test server port.')
    735 
    736   if command == 'gtest':
    737     return _RunGTests(options, devices)
    738   elif command == 'linker':
    739     return _RunLinkerTests(options, devices)
    740   elif command == 'instrumentation':
    741     return _RunInstrumentationTests(options, option_parser.error, devices)
    742   elif command == 'uiautomator':
    743     return _RunUIAutomatorTests(options, option_parser.error, devices)
    744   elif command == 'monkey':
    745     return _RunMonkeyTests(options, option_parser.error, devices)
    746   elif command == 'perf':
    747     return _RunPerfTests(options, args, option_parser.error)
    748   else:
    749     raise Exception('Unknown test type.')
    750 
    751 
    752 def HelpCommand(command, _options, args, option_parser):
    753   """Display help for a certain command, or overall help.
    754 
    755   Args:
    756     command: String indicating the command that was received to trigger
    757         this function.
    758     options: optparse options dictionary. unused.
    759     args: List of extra args from optparse.
    760     option_parser: optparse.OptionParser object.
    761 
    762   Returns:
    763     Integer indicated exit code.
    764   """
    765   # If we don't have any args, display overall help
    766   if len(args) < 3:
    767     option_parser.print_help()
    768     return 0
    769   # If we have too many args, print an error
    770   if len(args) > 3:
    771     option_parser.error('Unrecognized arguments: %s' % (' '.join(args[3:])))
    772     return constants.ERROR_EXIT_CODE
    773 
    774   command = args[2]
    775 
    776   if command not in VALID_COMMANDS:
    777     option_parser.error('Unrecognized command.')
    778 
    779   # Treat the help command as a special case. We don't care about showing a
    780   # specific help page for itself.
    781   if command == 'help':
    782     option_parser.print_help()
    783     return 0
    784 
    785   VALID_COMMANDS[command].add_options_func(option_parser)
    786   option_parser.usage = '%prog ' + command + ' [options]'
    787   option_parser.commands_dict = {}
    788   option_parser.print_help()
    789 
    790   return 0
    791 
    792 
    793 # Define a named tuple for the values in the VALID_COMMANDS dictionary so the
    794 # syntax is a bit prettier. The tuple is two functions: (add options, run
    795 # command).
    796 CommandFunctionTuple = collections.namedtuple(
    797     'CommandFunctionTuple', ['add_options_func', 'run_command_func'])
    798 VALID_COMMANDS = {
    799     'gtest': CommandFunctionTuple(AddGTestOptions, RunTestsCommand),
    800     'instrumentation': CommandFunctionTuple(
    801         AddInstrumentationTestOptions, RunTestsCommand),
    802     'uiautomator': CommandFunctionTuple(
    803         AddUIAutomatorTestOptions, RunTestsCommand),
    804     'monkey': CommandFunctionTuple(
    805         AddMonkeyTestOptions, RunTestsCommand),
    806     'perf': CommandFunctionTuple(
    807         AddPerfTestOptions, RunTestsCommand),
    808     'linker': CommandFunctionTuple(
    809         AddLinkerTestOptions, RunTestsCommand),
    810     'help': CommandFunctionTuple(lambda option_parser: None, HelpCommand)
    811     }
    812 
    813 
    814 def DumpThreadStacks(_signal, _frame):
    815   for thread in threading.enumerate():
    816     reraiser_thread.LogThreadStack(thread)
    817 
    818 
    819 def main():
    820   signal.signal(signal.SIGUSR1, DumpThreadStacks)
    821   option_parser = command_option_parser.CommandOptionParser(
    822       commands_dict=VALID_COMMANDS)
    823   return command_option_parser.ParseAndExecute(option_parser)
    824 
    825 
    826 if __name__ == '__main__':
    827   sys.exit(main())
    828