Home | History | Annotate | Download | only in android
      1 #!/usr/bin/env python
      2 #
      3 # Copyright 2013 The Chromium Authors. All rights reserved.
      4 # Use of this source code is governed by a BSD-style license that can be
      5 # found in the LICENSE file.
      6 
      7 """Runs all types of tests from one unified interface."""
      8 
      9 import argparse
     10 import collections
     11 import itertools
     12 import logging
     13 import os
     14 import signal
     15 import sys
     16 import threading
     17 import unittest
     18 
     19 import devil_chromium
     20 from devil import base_error
     21 from devil import devil_env
     22 from devil.android import device_blacklist
     23 from devil.android import device_errors
     24 from devil.android import device_utils
     25 from devil.android import forwarder
     26 from devil.android import ports
     27 from devil.utils import reraiser_thread
     28 from devil.utils import run_tests_helper
     29 
     30 from pylib import constants
     31 from pylib.constants import host_paths
     32 from pylib.base import base_test_result
     33 from pylib.base import environment_factory
     34 from pylib.base import test_dispatcher
     35 from pylib.base import test_instance_factory
     36 from pylib.base import test_run_factory
     37 from pylib.linker import setup as linker_setup
     38 from pylib.junit import setup as junit_setup
     39 from pylib.junit import test_dispatcher as junit_dispatcher
     40 from pylib.monkey import setup as monkey_setup
     41 from pylib.monkey import test_options as monkey_test_options
     42 from pylib.perf import setup as perf_setup
     43 from pylib.perf import test_options as perf_test_options
     44 from pylib.perf import test_runner as perf_test_runner
     45 from pylib.results import json_results
     46 from pylib.results import report_results
     47 
     48 
     49 _DEVIL_STATIC_CONFIG_FILE = os.path.abspath(os.path.join(
     50     host_paths.DIR_SOURCE_ROOT, 'build', 'android', 'devil_config.json'))
     51 
     52 
     53 def AddCommonOptions(parser):
     54   """Adds all common options to |parser|."""
     55 
     56   group = parser.add_argument_group('Common Options')
     57 
     58   default_build_type = os.environ.get('BUILDTYPE', 'Debug')
     59 
     60   debug_or_release_group = group.add_mutually_exclusive_group()
     61   debug_or_release_group.add_argument(
     62       '--debug', action='store_const', const='Debug', dest='build_type',
     63       default=default_build_type,
     64       help=('If set, run test suites under out/Debug. '
     65             'Default is env var BUILDTYPE or Debug.'))
     66   debug_or_release_group.add_argument(
     67       '--release', action='store_const', const='Release', dest='build_type',
     68       help=('If set, run test suites under out/Release. '
     69             'Default is env var BUILDTYPE or Debug.'))
     70 
     71   group.add_argument('--build-directory', dest='build_directory',
     72                      help=('Path to the directory in which build files are'
     73                            ' located (should not include build type)'))
     74   group.add_argument('--output-directory', dest='output_directory',
     75                      help=('Path to the directory in which build files are'
     76                            ' located (must include build type). This will take'
     77                            ' precedence over --debug, --release and'
     78                            ' --build-directory'))
     79   group.add_argument('--num_retries', '--num-retries', dest='num_retries',
     80                      type=int, default=2,
     81                      help=('Number of retries for a test before '
     82                            'giving up (default: %(default)s).'))
     83   group.add_argument('-v',
     84                      '--verbose',
     85                      dest='verbose_count',
     86                      default=0,
     87                      action='count',
     88                      help='Verbose level (multiple times for more)')
     89   group.add_argument('--flakiness-dashboard-server',
     90                      dest='flakiness_dashboard_server',
     91                      help=('Address of the server that is hosting the '
     92                            'Chrome for Android flakiness dashboard.'))
     93   group.add_argument('--enable-platform-mode', action='store_true',
     94                      help=('Run the test scripts in platform mode, which '
     95                            'conceptually separates the test runner from the '
     96                            '"device" (local or remote, real or emulated) on '
     97                            'which the tests are running. [experimental]'))
     98   group.add_argument('-e', '--environment', default='local',
     99                      choices=constants.VALID_ENVIRONMENTS,
    100                      help='Test environment to run in (default: %(default)s).')
    101   group.add_argument('--adb-path',
    102                      help=('Specify the absolute path of the adb binary that '
    103                            'should be used.'))
    104   group.add_argument('--json-results-file', '--test-launcher-summary-output',
    105                      dest='json_results_file',
    106                      help='If set, will dump results in JSON form '
    107                           'to specified file.')
    108 
    109   logcat_output_group = group.add_mutually_exclusive_group()
    110   logcat_output_group.add_argument(
    111       '--logcat-output-dir',
    112       help='If set, will dump logcats recorded during test run to directory. '
    113            'File names will be the device ids with timestamps.')
    114   logcat_output_group.add_argument(
    115       '--logcat-output-file',
    116       help='If set, will merge logcats recorded during test run and dump them '
    117            'to the specified file.')
    118 
    119   class FastLocalDevAction(argparse.Action):
    120     def __call__(self, parser, namespace, values, option_string=None):
    121       namespace.verbose_count = max(namespace.verbose_count, 1)
    122       namespace.num_retries = 0
    123       namespace.enable_device_cache = True
    124       namespace.enable_concurrent_adb = True
    125       namespace.skip_clear_data = True
    126       namespace.extract_test_list_from_filter = True
    127 
    128   group.add_argument('--fast-local-dev', type=bool, nargs=0,
    129                      action=FastLocalDevAction,
    130                      help='Alias for: --verbose --num-retries=0 '
    131                           '--enable-device-cache --enable-concurrent-adb '
    132                           '--skip-clear-data --extract-test-list-from-filter')
    133 
    134 def ProcessCommonOptions(args):
    135   """Processes and handles all common options."""
    136   run_tests_helper.SetLogLevel(args.verbose_count)
    137   constants.SetBuildType(args.build_type)
    138   if args.build_directory:
    139     constants.SetBuildDirectory(args.build_directory)
    140   if args.output_directory:
    141     constants.SetOutputDirectory(args.output_directory)
    142 
    143   devil_custom_deps = None
    144   if args.adb_path:
    145     devil_custom_deps = {
    146       'adb': {
    147         devil_env.GetPlatform(): [args.adb_path]
    148       }
    149     }
    150 
    151   devil_chromium.Initialize(
    152       output_directory=constants.GetOutDirectory(),
    153       custom_deps=devil_custom_deps)
    154 
    155   # Some things such as Forwarder require ADB to be in the environment path.
    156   adb_dir = os.path.dirname(constants.GetAdbPath())
    157   if adb_dir and adb_dir not in os.environ['PATH'].split(os.pathsep):
    158     os.environ['PATH'] = adb_dir + os.pathsep + os.environ['PATH']
    159 
    160 
    161 def AddRemoteDeviceOptions(parser):
    162   group = parser.add_argument_group('Remote Device Options')
    163 
    164   group.add_argument('--trigger',
    165                      help=('Only triggers the test if set. Stores test_run_id '
    166                            'in given file path. '))
    167   group.add_argument('--collect',
    168                      help=('Only collects the test results if set. '
    169                            'Gets test_run_id from given file path.'))
    170   group.add_argument('--remote-device', action='append',
    171                      help='Device type to run test on.')
    172   group.add_argument('--results-path',
    173                      help='File path to download results to.')
    174   group.add_argument('--api-protocol',
    175                      help='HTTP protocol to use. (http or https)')
    176   group.add_argument('--api-address',
    177                      help='Address to send HTTP requests.')
    178   group.add_argument('--api-port',
    179                      help='Port to send HTTP requests to.')
    180   group.add_argument('--runner-type',
    181                      help='Type of test to run as.')
    182   group.add_argument('--runner-package',
    183                      help='Package name of test.')
    184   group.add_argument('--device-type',
    185                      choices=constants.VALID_DEVICE_TYPES,
    186                      help=('Type of device to run on. iOS or android'))
    187   group.add_argument('--device-oem', action='append',
    188                      help='Device OEM to run on.')
    189   group.add_argument('--remote-device-file',
    190                      help=('File with JSON to select remote device. '
    191                            'Overrides all other flags.'))
    192   group.add_argument('--remote-device-timeout', type=int,
    193                      help='Times to retry finding remote device')
    194   group.add_argument('--network-config', type=int,
    195                      help='Integer that specifies the network environment '
    196                           'that the tests will be run in.')
    197   group.add_argument('--test-timeout', type=int,
    198                      help='Test run timeout in seconds.')
    199 
    200   device_os_group = group.add_mutually_exclusive_group()
    201   device_os_group.add_argument('--remote-device-minimum-os',
    202                                help='Minimum OS on device.')
    203   device_os_group.add_argument('--remote-device-os', action='append',
    204                                help='OS to have on the device.')
    205 
    206   api_secret_group = group.add_mutually_exclusive_group()
    207   api_secret_group.add_argument('--api-secret', default='',
    208                                 help='API secret for remote devices.')
    209   api_secret_group.add_argument('--api-secret-file', default='',
    210                                 help='Path to file that contains API secret.')
    211 
    212   api_key_group = group.add_mutually_exclusive_group()
    213   api_key_group.add_argument('--api-key', default='',
    214                              help='API key for remote devices.')
    215   api_key_group.add_argument('--api-key-file', default='',
    216                              help='Path to file that contains API key.')
    217 
    218 
    219 def AddDeviceOptions(parser):
    220   """Adds device options to |parser|."""
    221   group = parser.add_argument_group(title='Device Options')
    222   group.add_argument('--tool',
    223                      dest='tool',
    224                      help=('Run the test under a tool '
    225                            '(use --tool help to list them)'))
    226   group.add_argument('-d', '--device', dest='test_device',
    227                      help=('Target device for the test suite '
    228                            'to run on.'))
    229   group.add_argument('--blacklist-file', help='Device blacklist file.')
    230   group.add_argument('--enable-device-cache', action='store_true',
    231                      help='Cache device state to disk between runs')
    232   group.add_argument('--enable-concurrent-adb', action='store_true',
    233                      help='Run multiple adb commands at the same time, even '
    234                           'for the same device.')
    235   group.add_argument('--skip-clear-data', action='store_true',
    236                      help='Do not wipe app data between tests. Use this to '
    237                      'speed up local development and never on bots '
    238                      '(increases flakiness)')
    239 
    240 
    241 def AddGTestOptions(parser):
    242   """Adds gtest options to |parser|."""
    243 
    244   group = parser.add_argument_group('GTest Options')
    245   group.add_argument('-s', '--suite', dest='suite_name',
    246                      nargs='+', metavar='SUITE_NAME', required=True,
    247                      help='Executable name of the test suite to run.')
    248   group.add_argument('--executable-dist-dir',
    249                      help="Path to executable's dist directory for native"
    250                           " (non-apk) tests.")
    251   group.add_argument('--test-apk-incremental-install-script',
    252                      help='Path to install script for the test apk.')
    253   group.add_argument('--gtest_also_run_disabled_tests',
    254                      '--gtest-also-run-disabled-tests',
    255                      dest='run_disabled', action='store_true',
    256                      help='Also run disabled tests if applicable.')
    257   group.add_argument('-a', '--test-arguments', dest='test_arguments',
    258                      default='',
    259                      help='Additional arguments to pass to the test.')
    260   group.add_argument('-t', '--shard-timeout',
    261                      dest='shard_timeout', type=int, default=120,
    262                      help='Timeout to wait for each test '
    263                           '(default: %(default)s).')
    264   group.add_argument('--isolate_file_path',
    265                      '--isolate-file-path',
    266                      dest='isolate_file_path',
    267                      help='.isolate file path to override the default '
    268                           'path')
    269   group.add_argument('--app-data-file', action='append', dest='app_data_files',
    270                      help='A file path relative to the app data directory '
    271                           'that should be saved to the host.')
    272   group.add_argument('--app-data-file-dir',
    273                      help='Host directory to which app data files will be'
    274                           ' saved. Used with --app-data-file.')
    275   group.add_argument('--delete-stale-data', dest='delete_stale_data',
    276                      action='store_true',
    277                      help='Delete stale test data on the device.')
    278   group.add_argument('--repeat', '--gtest_repeat', '--gtest-repeat',
    279                      dest='repeat', type=int, default=0,
    280                      help='Number of times to repeat the specified set of '
    281                           'tests.')
    282   group.add_argument('--break-on-failure', '--break_on_failure',
    283                      dest='break_on_failure', action='store_true',
    284                      help='Whether to break on failure.')
    285   group.add_argument('--extract-test-list-from-filter',
    286                      action='store_true',
    287                      help='When a test filter is specified, and the list of '
    288                           'tests can be determined from it, skip querying the '
    289                           'device for the list of all tests. Speeds up local '
    290                           'development, but is not safe to use on bots ('
    291                           'http://crbug.com/549214')
    292 
    293   filter_group = group.add_mutually_exclusive_group()
    294   filter_group.add_argument('-f', '--gtest_filter', '--gtest-filter',
    295                             dest='test_filter',
    296                             help='googletest-style filter string.')
    297   filter_group.add_argument('--gtest-filter-file', dest='test_filter_file',
    298                             help='Path to file that contains googletest-style '
    299                                   'filter strings. (Lines will be joined with '
    300                                   '":" to create a single filter string.)')
    301 
    302   AddDeviceOptions(parser)
    303   AddCommonOptions(parser)
    304   AddRemoteDeviceOptions(parser)
    305 
    306 
    307 def AddLinkerTestOptions(parser):
    308   group = parser.add_argument_group('Linker Test Options')
    309   group.add_argument('-f', '--gtest-filter', dest='test_filter',
    310                      help='googletest-style filter string.')
    311   AddCommonOptions(parser)
    312   AddDeviceOptions(parser)
    313 
    314 
    315 def AddJavaTestOptions(argument_group):
    316   """Adds the Java test options to |option_parser|."""
    317 
    318   argument_group.add_argument(
    319       '-f', '--test-filter', '--gtest_filter', '--gtest-filter',
    320       dest='test_filter',
    321       help=('Test filter (if not fully qualified, will run all matches).'))
    322   argument_group.add_argument(
    323       '--repeat', dest='repeat', type=int, default=0,
    324       help='Number of times to repeat the specified set of tests.')
    325   argument_group.add_argument(
    326       '--break-on-failure', '--break_on_failure',
    327       dest='break_on_failure', action='store_true',
    328       help='Whether to break on failure.')
    329   argument_group.add_argument(
    330       '-A', '--annotation', dest='annotation_str',
    331       help=('Comma-separated list of annotations. Run only tests with any of '
    332             'the given annotations. An annotation can be either a key or a '
    333             'key-values pair. A test that has no annotation is considered '
    334             '"SmallTest".'))
    335   argument_group.add_argument(
    336       '-E', '--exclude-annotation', dest='exclude_annotation_str',
    337       help=('Comma-separated list of annotations. Exclude tests with these '
    338             'annotations.'))
    339   argument_group.add_argument(
    340       '--screenshot', dest='screenshot_failures', action='store_true',
    341       help='Capture screenshots of test failures')
    342   argument_group.add_argument(
    343       '--save-perf-json', action='store_true',
    344       help='Saves the JSON file for each UI Perf test.')
    345   argument_group.add_argument(
    346       '--official-build', action='store_true', help='Run official build tests.')
    347   argument_group.add_argument(
    348       '--test_data', '--test-data', action='append', default=[],
    349       help=('Each instance defines a directory of test data that should be '
    350             'copied to the target(s) before running the tests. The argument '
    351             'should be of the form <target>:<source>, <target> is relative to '
    352             'the device data directory, and <source> is relative to the '
    353             'chromium build directory.'))
    354   argument_group.add_argument(
    355       '--disable-dalvik-asserts', dest='set_asserts', action='store_false',
    356       default=True, help='Removes the dalvik.vm.enableassertions property')
    357 
    358 
    359 
    360 def ProcessJavaTestOptions(args):
    361   """Processes options/arguments and populates |options| with defaults."""
    362 
    363   # TODO(jbudorick): Handle most of this function in argparse.
    364   if args.annotation_str:
    365     args.annotations = args.annotation_str.split(',')
    366   elif args.test_filter:
    367     args.annotations = []
    368   else:
    369     args.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest',
    370                         'EnormousTest', 'IntegrationTest']
    371 
    372   if args.exclude_annotation_str:
    373     args.exclude_annotations = args.exclude_annotation_str.split(',')
    374   else:
    375     args.exclude_annotations = []
    376 
    377 
    378 def AddInstrumentationTestOptions(parser):
    379   """Adds Instrumentation test options to |parser|."""
    380 
    381   parser.usage = '%(prog)s [options]'
    382 
    383   group = parser.add_argument_group('Instrumentation Test Options')
    384   AddJavaTestOptions(group)
    385 
    386   java_or_python_group = group.add_mutually_exclusive_group()
    387   java_or_python_group.add_argument(
    388       '-j', '--java-only', action='store_false',
    389       dest='run_python_tests', default=True, help='Run only the Java tests.')
    390   java_or_python_group.add_argument(
    391       '-p', '--python-only', action='store_false',
    392       dest='run_java_tests', default=True,
    393       help='DEPRECATED')
    394 
    395   group.add_argument('--host-driven-root',
    396                      help='DEPRECATED')
    397   group.add_argument('-w', '--wait_debugger', dest='wait_for_debugger',
    398                      action='store_true',
    399                      help='Wait for debugger.')
    400   group.add_argument('--apk-under-test',
    401                      help='Path or name of the apk under test.')
    402   group.add_argument('--apk-under-test-incremental-install-script',
    403                      help='Path to install script for the --apk-under-test.')
    404   group.add_argument('--test-apk', required=True,
    405                      help='Path or name of the apk containing the tests '
    406                           '(name is without the .apk extension; '
    407                           'e.g. "ContentShellTest").')
    408   group.add_argument('--test-apk-incremental-install-script',
    409                      help='Path to install script for the --test-apk.')
    410   group.add_argument('--additional-apk', action='append',
    411                      dest='additional_apks', default=[],
    412                      help='Additional apk that must be installed on '
    413                           'the device when the tests are run')
    414   group.add_argument('--coverage-dir',
    415                      help=('Directory in which to place all generated '
    416                            'EMMA coverage files.'))
    417   group.add_argument('--device-flags', dest='device_flags', default='',
    418                      help='The relative filepath to a file containing '
    419                           'command-line flags to set on the device')
    420   group.add_argument('--device-flags-file', default='',
    421                      help='The relative filepath to a file containing '
    422                           'command-line flags to set on the device')
    423   group.add_argument('--isolate_file_path',
    424                      '--isolate-file-path',
    425                      dest='isolate_file_path',
    426                      help='.isolate file path to override the default '
    427                           'path')
    428   group.add_argument('--delete-stale-data', dest='delete_stale_data',
    429                      action='store_true',
    430                      help='Delete stale test data on the device.')
    431   group.add_argument('--timeout-scale', type=float,
    432                      help='Factor by which timeouts should be scaled.')
    433   group.add_argument('--strict-mode', dest='strict_mode', default='testing',
    434                      help='StrictMode command-line flag set on the device, '
    435                           'death/testing to kill the process, off to stop '
    436                           'checking, flash to flash only. Default testing.')
    437 
    438   AddCommonOptions(parser)
    439   AddDeviceOptions(parser)
    440   AddRemoteDeviceOptions(parser)
    441 
    442 
    443 def AddJUnitTestOptions(parser):
    444   """Adds junit test options to |parser|."""
    445 
    446   group = parser.add_argument_group('JUnit Test Options')
    447   group.add_argument(
    448       '-s', '--test-suite', dest='test_suite', required=True,
    449       help=('JUnit test suite to run.'))
    450   group.add_argument(
    451       '-f', '--test-filter', dest='test_filter',
    452       help='Filters tests googletest-style.')
    453   group.add_argument(
    454       '--package-filter', dest='package_filter',
    455       help='Filters tests by package.')
    456   group.add_argument(
    457       '--runner-filter', dest='runner_filter',
    458       help='Filters tests by runner class. Must be fully qualified.')
    459   group.add_argument(
    460       '--sdk-version', dest='sdk_version', type=int,
    461       help='The Android SDK version.')
    462   AddCommonOptions(parser)
    463 
    464 
    465 def AddMonkeyTestOptions(parser):
    466   """Adds monkey test options to |parser|."""
    467 
    468   group = parser.add_argument_group('Monkey Test Options')
    469   group.add_argument(
    470       '--package', required=True, choices=constants.PACKAGE_INFO.keys(),
    471       metavar='PACKAGE', help='Package under test.')
    472   group.add_argument(
    473       '--event-count', default=10000, type=int,
    474       help='Number of events to generate (default: %(default)s).')
    475   group.add_argument(
    476       '--category', default='',
    477       help='A list of allowed categories.')
    478   group.add_argument(
    479       '--throttle', default=100, type=int,
    480       help='Delay between events (ms) (default: %(default)s). ')
    481   group.add_argument(
    482       '--seed', type=int,
    483       help=('Seed value for pseudo-random generator. Same seed value generates '
    484             'the same sequence of events. Seed is randomized by default.'))
    485   group.add_argument(
    486       '--extra-args', default='',
    487       help=('String of other args to pass to the command verbatim.'))
    488 
    489   AddCommonOptions(parser)
    490   AddDeviceOptions(parser)
    491 
    492 def ProcessMonkeyTestOptions(args):
    493   """Processes all monkey test options.
    494 
    495   Args:
    496     args: argparse.Namespace object.
    497 
    498   Returns:
    499     A MonkeyOptions named tuple which contains all options relevant to
    500     monkey tests.
    501   """
    502   # TODO(jbudorick): Handle this directly in argparse with nargs='+'
    503   category = args.category
    504   if category:
    505     category = args.category.split(',')
    506 
    507   # TODO(jbudorick): Get rid of MonkeyOptions.
    508   return monkey_test_options.MonkeyOptions(
    509       args.verbose_count,
    510       args.package,
    511       args.event_count,
    512       category,
    513       args.throttle,
    514       args.seed,
    515       args.extra_args)
    516 
    517 def AddUirobotTestOptions(parser):
    518   """Adds uirobot test options to |option_parser|."""
    519   group = parser.add_argument_group('Uirobot Test Options')
    520 
    521   group.add_argument('--app-under-test', required=True,
    522                      help='APK to run tests on.')
    523   group.add_argument(
    524       '--repeat', dest='repeat', type=int, default=0,
    525       help='Number of times to repeat the uirobot test.')
    526   group.add_argument(
    527       '--minutes', default=5, type=int,
    528       help='Number of minutes to run uirobot test [default: %(default)s].')
    529 
    530   AddCommonOptions(parser)
    531   AddDeviceOptions(parser)
    532   AddRemoteDeviceOptions(parser)
    533 
    534 def AddPerfTestOptions(parser):
    535   """Adds perf test options to |parser|."""
    536 
    537   group = parser.add_argument_group('Perf Test Options')
    538 
    539   class SingleStepAction(argparse.Action):
    540     def __call__(self, parser, namespace, values, option_string=None):
    541       if values and not namespace.single_step:
    542         parser.error('single step command provided, '
    543                      'but --single-step not specified.')
    544       elif namespace.single_step and not values:
    545         parser.error('--single-step specified, '
    546                      'but no single step command provided.')
    547       setattr(namespace, self.dest, values)
    548 
    549   step_group = group.add_mutually_exclusive_group(required=True)
    550   # TODO(jbudorick): Revise --single-step to use argparse.REMAINDER.
    551   # This requires removing "--" from client calls.
    552   step_group.add_argument(
    553       '--single-step', action='store_true',
    554       help='Execute the given command with retries, but only print the result '
    555            'for the "most successful" round.')
    556   step_group.add_argument(
    557       '--steps',
    558       help='JSON file containing the list of commands to run.')
    559   step_group.add_argument(
    560       '--print-step',
    561       help='The name of a previously executed perf step to print.')
    562 
    563   group.add_argument(
    564       '--output-json-list',
    565       help='Write a simple list of names from --steps into the given file.')
    566   group.add_argument(
    567       '--collect-chartjson-data',
    568       action='store_true',
    569       help='Cache the chartjson output from each step for later use.')
    570   group.add_argument(
    571       '--output-chartjson-data',
    572       default='',
    573       help='Write out chartjson into the given file.')
    574   group.add_argument(
    575       '--get-output-dir-archive', metavar='FILENAME',
    576       help='Write the chached output directory archived by a step into the'
    577       ' given ZIP file.')
    578   group.add_argument(
    579       '--flaky-steps',
    580       help=('A JSON file containing steps that are flaky '
    581             'and will have its exit code ignored.'))
    582   group.add_argument(
    583       '--no-timeout', action='store_true',
    584       help=('Do not impose a timeout. Each perf step is responsible for '
    585             'implementing the timeout logic.'))
    586   group.add_argument(
    587       '-f', '--test-filter',
    588       help=('Test filter (will match against the names listed in --steps).'))
    589   group.add_argument(
    590       '--dry-run', action='store_true',
    591       help='Just print the steps without executing.')
    592   # Uses 0.1 degrees C because that's what Android does.
    593   group.add_argument(
    594       '--max-battery-temp', type=int,
    595       help='Only start tests when the battery is at or below the given '
    596            'temperature (0.1 C)')
    597   group.add_argument('single_step_command', nargs='*', action=SingleStepAction,
    598                      help='If --single-step is specified, the command to run.')
    599   group.add_argument('--min-battery-level', type=int,
    600                      help='Only starts tests when the battery is charged above '
    601                           'given level.')
    602   group.add_argument('--known-devices-file', help='Path to known device list.')
    603   AddCommonOptions(parser)
    604   AddDeviceOptions(parser)
    605 
    606 
    607 def ProcessPerfTestOptions(args):
    608   """Processes all perf test options.
    609 
    610   Args:
    611     args: argparse.Namespace object.
    612 
    613   Returns:
    614     A PerfOptions named tuple which contains all options relevant to
    615     perf tests.
    616   """
    617   # TODO(jbudorick): Move single_step handling down into the perf tests.
    618   if args.single_step:
    619     args.single_step = ' '.join(args.single_step_command)
    620   # TODO(jbudorick): Get rid of PerfOptions.
    621   return perf_test_options.PerfOptions(
    622       args.steps, args.flaky_steps, args.output_json_list,
    623       args.print_step, args.no_timeout, args.test_filter,
    624       args.dry_run, args.single_step, args.collect_chartjson_data,
    625       args.output_chartjson_data, args.get_output_dir_archive,
    626       args.max_battery_temp, args.min_battery_level,
    627       args.known_devices_file)
    628 
    629 
    630 def AddPythonTestOptions(parser):
    631   group = parser.add_argument_group('Python Test Options')
    632   group.add_argument(
    633       '-s', '--suite', dest='suite_name', metavar='SUITE_NAME',
    634       choices=constants.PYTHON_UNIT_TEST_SUITES.keys(),
    635       help='Name of the test suite to run.')
    636   AddCommonOptions(parser)
    637 
    638 
    639 def _RunLinkerTests(args, devices):
    640   """Subcommand of RunTestsCommands which runs linker tests."""
    641   runner_factory, tests = linker_setup.Setup(args, devices)
    642 
    643   results, exit_code = test_dispatcher.RunTests(
    644       tests, runner_factory, devices, shard=True, test_timeout=60,
    645       num_retries=args.num_retries)
    646 
    647   report_results.LogFull(
    648       results=results,
    649       test_type='Linker test',
    650       test_package='ChromiumLinkerTest')
    651 
    652   if args.json_results_file:
    653     json_results.GenerateJsonResultsFile([results], args.json_results_file)
    654 
    655   return exit_code
    656 
    657 
    658 def _RunJUnitTests(args):
    659   """Subcommand of RunTestsCommand which runs junit tests."""
    660   runner_factory, tests = junit_setup.Setup(args)
    661   results, exit_code = junit_dispatcher.RunTests(tests, runner_factory)
    662 
    663   report_results.LogFull(
    664       results=results,
    665       test_type='JUnit',
    666       test_package=args.test_suite)
    667 
    668   if args.json_results_file:
    669     json_results.GenerateJsonResultsFile([results], args.json_results_file)
    670 
    671   return exit_code
    672 
    673 
    674 def _RunMonkeyTests(args, devices):
    675   """Subcommand of RunTestsCommands which runs monkey tests."""
    676   monkey_options = ProcessMonkeyTestOptions(args)
    677 
    678   runner_factory, tests = monkey_setup.Setup(monkey_options)
    679 
    680   results, exit_code = test_dispatcher.RunTests(
    681       tests, runner_factory, devices, shard=False, test_timeout=None,
    682       num_retries=args.num_retries)
    683 
    684   report_results.LogFull(
    685       results=results,
    686       test_type='Monkey',
    687       test_package='Monkey')
    688 
    689   if args.json_results_file:
    690     json_results.GenerateJsonResultsFile([results], args.json_results_file)
    691 
    692   return exit_code
    693 
    694 
    695 def _RunPerfTests(args, active_devices):
    696   """Subcommand of RunTestsCommands which runs perf tests."""
    697   perf_options = ProcessPerfTestOptions(args)
    698 
    699   # Just save a simple json with a list of test names.
    700   if perf_options.output_json_list:
    701     return perf_test_runner.OutputJsonList(
    702         perf_options.steps, perf_options.output_json_list)
    703 
    704   # Just print the results from a single previously executed step.
    705   if perf_options.print_step:
    706     return perf_test_runner.PrintTestOutput(
    707         perf_options.print_step, perf_options.output_chartjson_data,
    708         perf_options.get_output_dir_archive)
    709 
    710   runner_factory, tests, devices = perf_setup.Setup(
    711       perf_options, active_devices)
    712 
    713   # shard=False means that each device will get the full list of tests
    714   # and then each one will decide their own affinity.
    715   # shard=True means each device will pop the next test available from a queue,
    716   # which increases throughput but have no affinity.
    717   results, _ = test_dispatcher.RunTests(
    718       tests, runner_factory, devices, shard=False, test_timeout=None,
    719       num_retries=args.num_retries)
    720 
    721   report_results.LogFull(
    722       results=results,
    723       test_type='Perf',
    724       test_package='Perf')
    725 
    726   if args.json_results_file:
    727     json_results.GenerateJsonResultsFile([results], args.json_results_file)
    728 
    729   if perf_options.single_step:
    730     return perf_test_runner.PrintTestOutput('single_step')
    731 
    732   perf_test_runner.PrintSummary(tests)
    733 
    734   # Always return 0 on the sharding stage. Individual tests exit_code
    735   # will be returned on the print_step stage.
    736   return 0
    737 
    738 
    739 def _RunPythonTests(args):
    740   """Subcommand of RunTestsCommand which runs python unit tests."""
    741   suite_vars = constants.PYTHON_UNIT_TEST_SUITES[args.suite_name]
    742   suite_path = suite_vars['path']
    743   suite_test_modules = suite_vars['test_modules']
    744 
    745   sys.path = [suite_path] + sys.path
    746   try:
    747     suite = unittest.TestSuite()
    748     suite.addTests(unittest.defaultTestLoader.loadTestsFromName(m)
    749                    for m in suite_test_modules)
    750     runner = unittest.TextTestRunner(verbosity=1+args.verbose_count)
    751     return 0 if runner.run(suite).wasSuccessful() else 1
    752   finally:
    753     sys.path = sys.path[1:]
    754 
    755 
    756 def _GetAttachedDevices(blacklist_file, test_device, enable_cache, num_retries):
    757   """Get all attached devices.
    758 
    759   Args:
    760     blacklist_file: Path to device blacklist.
    761     test_device: Name of a specific device to use.
    762     enable_cache: Whether to enable checksum caching.
    763 
    764   Returns:
    765     A list of attached devices.
    766   """
    767   blacklist = (device_blacklist.Blacklist(blacklist_file)
    768                if blacklist_file
    769                else None)
    770 
    771   attached_devices = device_utils.DeviceUtils.HealthyDevices(
    772       blacklist, enable_device_files_cache=enable_cache,
    773       default_retries=num_retries)
    774   if test_device:
    775     test_device = [d for d in attached_devices if d == test_device]
    776     if not test_device:
    777       raise device_errors.DeviceUnreachableError(
    778           'Did not find device %s among attached device. Attached devices: %s'
    779           % (test_device, ', '.join(attached_devices)))
    780     return test_device
    781 
    782   else:
    783     if not attached_devices:
    784       raise device_errors.NoDevicesError()
    785     return sorted(attached_devices)
    786 
    787 
    788 def RunTestsCommand(args): # pylint: disable=too-many-return-statements
    789   """Checks test type and dispatches to the appropriate function.
    790 
    791   Args:
    792     args: argparse.Namespace object.
    793 
    794   Returns:
    795     Integer indicated exit code.
    796 
    797   Raises:
    798     Exception: Unknown command name passed in, or an exception from an
    799         individual test runner.
    800   """
    801   command = args.command
    802 
    803   ProcessCommonOptions(args)
    804   logging.info('command: %s', ' '.join(sys.argv))
    805 
    806   if args.enable_platform_mode or command in ('gtest', 'instrumentation'):
    807     return RunTestsInPlatformMode(args)
    808 
    809   forwarder.Forwarder.RemoveHostLog()
    810   if not ports.ResetTestServerPortAllocation():
    811     raise Exception('Failed to reset test server port.')
    812 
    813   def get_devices():
    814     return _GetAttachedDevices(args.blacklist_file, args.test_device,
    815                                args.enable_device_cache, args.num_retries)
    816 
    817   if command == 'linker':
    818     return _RunLinkerTests(args, get_devices())
    819   elif command == 'junit':
    820     return _RunJUnitTests(args)
    821   elif command == 'monkey':
    822     return _RunMonkeyTests(args, get_devices())
    823   elif command == 'perf':
    824     return _RunPerfTests(args, get_devices())
    825   elif command == 'python':
    826     return _RunPythonTests(args)
    827   else:
    828     raise Exception('Unknown test type.')
    829 
    830 
    831 _SUPPORTED_IN_PLATFORM_MODE = [
    832   # TODO(jbudorick): Add support for more test types.
    833   'gtest',
    834   'instrumentation',
    835   'uirobot',
    836 ]
    837 
    838 
    839 def RunTestsInPlatformMode(args):
    840 
    841   def infra_error(message):
    842     logging.fatal(message)
    843     sys.exit(constants.INFRA_EXIT_CODE)
    844 
    845   if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
    846     infra_error('%s is not yet supported in platform mode' % args.command)
    847 
    848   with environment_factory.CreateEnvironment(args, infra_error) as env:
    849     with test_instance_factory.CreateTestInstance(args, infra_error) as test:
    850       with test_run_factory.CreateTestRun(
    851           args, env, test, infra_error) as test_run:
    852         results = []
    853         repetitions = (xrange(args.repeat + 1) if args.repeat >= 0
    854                        else itertools.count())
    855         result_counts = collections.defaultdict(
    856             lambda: collections.defaultdict(int))
    857         iteration_count = 0
    858         for _ in repetitions:
    859           iteration_results = test_run.RunTests()
    860           if iteration_results is not None:
    861             iteration_count += 1
    862             results.append(iteration_results)
    863             for r in iteration_results.GetAll():
    864               result_counts[r.GetName()][r.GetType()] += 1
    865             report_results.LogFull(
    866                 results=iteration_results,
    867                 test_type=test.TestType(),
    868                 test_package=test_run.TestPackage(),
    869                 annotation=getattr(args, 'annotations', None),
    870                 flakiness_server=getattr(args, 'flakiness_dashboard_server',
    871                                          None))
    872             if args.break_on_failure and not iteration_results.DidRunPass():
    873               break
    874 
    875         if iteration_count > 1:
    876           # display summary results
    877           # only display results for a test if at least one test did not pass
    878           all_pass = 0
    879           tot_tests = 0
    880           for test_name in result_counts:
    881             tot_tests += 1
    882             if any(result_counts[test_name][x] for x in (
    883                 base_test_result.ResultType.FAIL,
    884                 base_test_result.ResultType.CRASH,
    885                 base_test_result.ResultType.TIMEOUT,
    886                 base_test_result.ResultType.UNKNOWN)):
    887               logging.critical(
    888                   '%s: %s',
    889                   test_name,
    890                   ', '.join('%s %s' % (str(result_counts[test_name][i]), i)
    891                             for i in base_test_result.ResultType.GetTypes()))
    892             else:
    893               all_pass += 1
    894 
    895           logging.critical('%s of %s tests passed in all %s runs',
    896                            str(all_pass),
    897                            str(tot_tests),
    898                            str(iteration_count))
    899 
    900         if args.json_results_file:
    901           json_results.GenerateJsonResultsFile(
    902               results, args.json_results_file)
    903 
    904   return (0 if all(r.DidRunPass() for r in results)
    905           else constants.ERROR_EXIT_CODE)
    906 
    907 
    908 CommandConfigTuple = collections.namedtuple(
    909     'CommandConfigTuple',
    910     ['add_options_func', 'help_txt'])
    911 VALID_COMMANDS = {
    912     'gtest': CommandConfigTuple(
    913         AddGTestOptions,
    914         'googletest-based C++ tests'),
    915     'instrumentation': CommandConfigTuple(
    916         AddInstrumentationTestOptions,
    917         'InstrumentationTestCase-based Java tests'),
    918     'junit': CommandConfigTuple(
    919         AddJUnitTestOptions,
    920         'JUnit4-based Java tests'),
    921     'monkey': CommandConfigTuple(
    922         AddMonkeyTestOptions,
    923         "Tests based on Android's monkey"),
    924     'perf': CommandConfigTuple(
    925         AddPerfTestOptions,
    926         'Performance tests'),
    927     'python': CommandConfigTuple(
    928         AddPythonTestOptions,
    929         'Python tests based on unittest.TestCase'),
    930     'linker': CommandConfigTuple(
    931         AddLinkerTestOptions,
    932         'Linker tests'),
    933     'uirobot': CommandConfigTuple(
    934         AddUirobotTestOptions,
    935         'Uirobot test'),
    936 }
    937 
    938 
    939 def DumpThreadStacks(_signal, _frame):
    940   for thread in threading.enumerate():
    941     reraiser_thread.LogThreadStack(thread)
    942 
    943 
    944 def main():
    945   signal.signal(signal.SIGUSR1, DumpThreadStacks)
    946 
    947   parser = argparse.ArgumentParser()
    948   command_parsers = parser.add_subparsers(title='test types',
    949                                           dest='command')
    950 
    951   for test_type, config in sorted(VALID_COMMANDS.iteritems(),
    952                                   key=lambda x: x[0]):
    953     subparser = command_parsers.add_parser(
    954         test_type, usage='%(prog)s [options]', help=config.help_txt)
    955     config.add_options_func(subparser)
    956 
    957   args = parser.parse_args()
    958 
    959   try:
    960     return RunTestsCommand(args)
    961   except base_error.BaseError as e:
    962     logging.exception('Error occurred.')
    963     if e.is_infra_error:
    964       return constants.INFRA_EXIT_CODE
    965     return constants.ERROR_EXIT_CODE
    966   except: # pylint: disable=W0702
    967     logging.exception('Unrecognized error occurred.')
    968     return constants.ERROR_EXIT_CODE
    969 
    970 
    971 if __name__ == '__main__':
    972   sys.exit(main())
    973