Home | History | Annotate | Download | only in instrumentation
      1 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 # Use of this source code is governed by a BSD-style license that can be
      3 # found in the LICENSE file.
      4 
      5 """Class for running instrumentation tests on a single device."""
      6 
      7 import logging
      8 import os
      9 import re
     10 import sys
     11 import time
     12 
     13 from pylib import constants
     14 from pylib import flag_changer
     15 from pylib import valgrind_tools
     16 from pylib.base import base_test_result
     17 from pylib.base import base_test_runner
     18 from pylib.device import device_errors
     19 from pylib.instrumentation import json_perf_parser
     20 from pylib.instrumentation import test_result
     21 
     22 sys.path.append(os.path.join(constants.DIR_SOURCE_ROOT, 'build', 'util', 'lib',
     23                              'common'))
     24 import perf_tests_results_helper # pylint: disable=F0401
     25 
     26 
     27 _PERF_TEST_ANNOTATION = 'PerfTest'
     28 
     29 
     30 def _GetDataFilesForTestSuite(suite_basename):
     31   """Returns a list of data files/dirs needed by the test suite.
     32 
     33   Args:
     34     suite_basename: The test suite basename for which to return file paths.
     35 
     36   Returns:
     37     A list of test file and directory paths.
     38   """
     39   test_files = []
     40   if suite_basename in ['ChromeTest', 'ContentShellTest']:
     41     test_files += [
     42         'net/data/ssl/certificates/',
     43     ]
     44   return test_files
     45 
     46 
     47 class TestRunner(base_test_runner.BaseTestRunner):
     48   """Responsible for running a series of tests connected to a single device."""
     49 
     50   _DEVICE_DATA_DIR = 'chrome/test/data'
     51   _DEVICE_COVERAGE_DIR = 'chrome/test/coverage'
     52   _HOSTMACHINE_PERF_OUTPUT_FILE = '/tmp/chrome-profile'
     53   _DEVICE_PERF_OUTPUT_SEARCH_PREFIX = (constants.DEVICE_PERF_OUTPUT_DIR +
     54                                        '/chrome-profile*')
     55   _DEVICE_HAS_TEST_FILES = {}
     56 
     57   def __init__(self, test_options, device, shard_index, test_pkg,
     58                additional_flags=None):
     59     """Create a new TestRunner.
     60 
     61     Args:
     62       test_options: An InstrumentationOptions object.
     63       device: Attached android device.
     64       shard_index: Shard index.
     65       test_pkg: A TestPackage object.
     66       additional_flags: A list of additional flags to add to the command line.
     67     """
     68     super(TestRunner, self).__init__(device, test_options.tool,
     69                                      test_options.push_deps,
     70                                      test_options.cleanup_test_files)
     71     self._lighttp_port = constants.LIGHTTPD_RANDOM_PORT_FIRST + shard_index
     72 
     73     self.coverage_device_file = None
     74     self.coverage_dir = test_options.coverage_dir
     75     self.coverage_host_file = None
     76     self.options = test_options
     77     self.test_pkg = test_pkg
     78     # Use the correct command line file for the package under test.
     79     cmdline_file = [a.cmdline_file for a in constants.PACKAGE_INFO.itervalues()
     80                     if a.test_package == self.test_pkg.GetPackageName()]
     81     assert len(cmdline_file) < 2, 'Multiple packages have the same test package'
     82     if len(cmdline_file) and cmdline_file[0]:
     83       self.flags = flag_changer.FlagChanger(self.device, cmdline_file[0])
     84       if additional_flags:
     85         self.flags.AddFlags(additional_flags)
     86     else:
     87       self.flags = None
     88 
     89   #override
     90   def InstallTestPackage(self):
     91     self.test_pkg.Install(self.device)
     92 
     93   #override
     94   def PushDataDeps(self):
     95     # TODO(frankf): Implement a general approach for copying/installing
     96     # once across test runners.
     97     if TestRunner._DEVICE_HAS_TEST_FILES.get(self.device, False):
     98       logging.warning('Already copied test files to device %s, skipping.',
     99                       str(self.device))
    100       return
    101 
    102     test_data = _GetDataFilesForTestSuite(self.test_pkg.GetApkName())
    103     if test_data:
    104       # Make sure SD card is ready.
    105       self.device.WaitUntilFullyBooted(timeout=20)
    106       for p in test_data:
    107         self.device.PushChangedFiles(
    108             os.path.join(constants.DIR_SOURCE_ROOT, p),
    109             os.path.join(self.device.GetExternalStoragePath(), p))
    110 
    111     # TODO(frankf): Specify test data in this file as opposed to passing
    112     # as command-line.
    113     for dest_host_pair in self.options.test_data:
    114       dst_src = dest_host_pair.split(':', 1)
    115       dst_layer = dst_src[0]
    116       host_src = dst_src[1]
    117       host_test_files_path = os.path.join(constants.DIR_SOURCE_ROOT,
    118                                           host_src)
    119       if os.path.exists(host_test_files_path):
    120         self.device.PushChangedFiles(
    121             host_test_files_path,
    122             '%s/%s/%s' % (
    123                 self.device.GetExternalStoragePath(),
    124                 TestRunner._DEVICE_DATA_DIR,
    125                 dst_layer))
    126     self.tool.CopyFiles()
    127     TestRunner._DEVICE_HAS_TEST_FILES[str(self.device)] = True
    128 
    129   def _GetInstrumentationArgs(self):
    130     ret = {}
    131     if self.options.wait_for_debugger:
    132       ret['debug'] = 'true'
    133     if self.coverage_dir:
    134       ret['coverage'] = 'true'
    135       ret['coverageFile'] = self.coverage_device_file
    136 
    137     return ret
    138 
    139   def _TakeScreenshot(self, test):
    140     """Takes a screenshot from the device."""
    141     screenshot_name = os.path.join(constants.SCREENSHOTS_DIR, '%s.png' % test)
    142     logging.info('Taking screenshot named %s', screenshot_name)
    143     self.device.TakeScreenshot(screenshot_name)
    144 
    145   def SetUp(self):
    146     """Sets up the test harness and device before all tests are run."""
    147     super(TestRunner, self).SetUp()
    148     if not self.device.HasRoot():
    149       logging.warning('Unable to enable java asserts for %s, non rooted device',
    150                       str(self.device))
    151     else:
    152       if self.device.SetJavaAsserts(True):
    153         # TODO(jbudorick) How to best do shell restart after the
    154         #                 android_commands refactor?
    155         self.device.RunShellCommand('stop')
    156         self.device.RunShellCommand('start')
    157 
    158     # We give different default value to launch HTTP server based on shard index
    159     # because it may have race condition when multiple processes are trying to
    160     # launch lighttpd with same port at same time.
    161     self.LaunchTestHttpServer(
    162         os.path.join(constants.DIR_SOURCE_ROOT), self._lighttp_port)
    163     if self.flags:
    164       self.flags.AddFlags(['--disable-fre', '--enable-test-intents'])
    165       if self.options.device_flags:
    166         with open(self.options.device_flags) as device_flags_file:
    167           stripped_flags = (l.strip() for l in device_flags_file)
    168           self.flags.AddFlags([flag for flag in stripped_flags if flag])
    169 
    170   def TearDown(self):
    171     """Cleans up the test harness and saves outstanding data from test run."""
    172     if self.flags:
    173       self.flags.Restore()
    174     super(TestRunner, self).TearDown()
    175 
    176   def TestSetup(self, test):
    177     """Sets up the test harness for running a particular test.
    178 
    179     Args:
    180       test: The name of the test that will be run.
    181     """
    182     self.SetupPerfMonitoringIfNeeded(test)
    183     self._SetupIndividualTestTimeoutScale(test)
    184     self.tool.SetupEnvironment()
    185 
    186     # Make sure the forwarder is still running.
    187     self._RestartHttpServerForwarderIfNecessary()
    188 
    189     if self.coverage_dir:
    190       coverage_basename = '%s.ec' % test
    191       self.coverage_device_file = '%s/%s/%s' % (
    192           self.device.GetExternalStoragePath(),
    193           TestRunner._DEVICE_COVERAGE_DIR, coverage_basename)
    194       self.coverage_host_file = os.path.join(
    195           self.coverage_dir, coverage_basename)
    196 
    197   def _IsPerfTest(self, test):
    198     """Determines whether a test is a performance test.
    199 
    200     Args:
    201       test: The name of the test to be checked.
    202 
    203     Returns:
    204       Whether the test is annotated as a performance test.
    205     """
    206     return _PERF_TEST_ANNOTATION in self.test_pkg.GetTestAnnotations(test)
    207 
    208   def SetupPerfMonitoringIfNeeded(self, test):
    209     """Sets up performance monitoring if the specified test requires it.
    210 
    211     Args:
    212       test: The name of the test to be run.
    213     """
    214     if not self._IsPerfTest(test):
    215       return
    216     self.device.old_interface.Adb().SendCommand(
    217         'shell rm ' + TestRunner._DEVICE_PERF_OUTPUT_SEARCH_PREFIX)
    218     self.device.old_interface.StartMonitoringLogcat()
    219 
    220   def TestTeardown(self, test, result):
    221     """Cleans up the test harness after running a particular test.
    222 
    223     Depending on the options of this TestRunner this might handle performance
    224     tracking.  This method will only be called if the test passed.
    225 
    226     Args:
    227       test: The name of the test that was just run.
    228       result: result for this test.
    229     """
    230 
    231     self.tool.CleanUpEnvironment()
    232 
    233     # The logic below relies on the test passing.
    234     if not result or not result.DidRunPass():
    235       return
    236 
    237     self.TearDownPerfMonitoring(test)
    238 
    239     if self.coverage_dir:
    240       self.device.PullFile(
    241           self.coverage_device_file, self.coverage_host_file)
    242       self.device.RunShellCommand(
    243           'rm -f %s' % self.coverage_device_file)
    244 
    245   def TearDownPerfMonitoring(self, test):
    246     """Cleans up performance monitoring if the specified test required it.
    247 
    248     Args:
    249       test: The name of the test that was just run.
    250     Raises:
    251       Exception: if there's anything wrong with the perf data.
    252     """
    253     if not self._IsPerfTest(test):
    254       return
    255     raw_test_name = test.split('#')[1]
    256 
    257     # Wait and grab annotation data so we can figure out which traces to parse
    258     regex = self.device.old_interface.WaitForLogMatch(
    259         re.compile('\*\*PERFANNOTATION\(' + raw_test_name + '\)\:(.*)'), None)
    260 
    261     # If the test is set to run on a specific device type only (IE: only
    262     # tablet or phone) and it is being run on the wrong device, the test
    263     # just quits and does not do anything.  The java test harness will still
    264     # print the appropriate annotation for us, but will add --NORUN-- for
    265     # us so we know to ignore the results.
    266     # The --NORUN-- tag is managed by MainActivityTestBase.java
    267     if regex.group(1) != '--NORUN--':
    268 
    269       # Obtain the relevant perf data.  The data is dumped to a
    270       # JSON formatted file.
    271       json_string = self.device.ReadFile(
    272           '/data/data/com.google.android.apps.chrome/files/PerfTestData.txt',
    273           as_root=True)
    274 
    275       if json_string:
    276         json_string = '\n'.join(json_string)
    277       else:
    278         raise Exception('Perf file does not exist or is empty')
    279 
    280       if self.options.save_perf_json:
    281         json_local_file = '/tmp/chromium-android-perf-json-' + raw_test_name
    282         with open(json_local_file, 'w') as f:
    283           f.write(json_string)
    284         logging.info('Saving Perf UI JSON from test ' +
    285                      test + ' to ' + json_local_file)
    286 
    287       raw_perf_data = regex.group(1).split(';')
    288 
    289       for raw_perf_set in raw_perf_data:
    290         if raw_perf_set:
    291           perf_set = raw_perf_set.split(',')
    292           if len(perf_set) != 3:
    293             raise Exception('Unexpected number of tokens in perf annotation '
    294                             'string: ' + raw_perf_set)
    295 
    296           # Process the performance data
    297           result = json_perf_parser.GetAverageRunInfoFromJSONString(json_string,
    298                                                                     perf_set[0])
    299           perf_tests_results_helper.PrintPerfResult(perf_set[1], perf_set[2],
    300                                                     [result['average']],
    301                                                     result['units'])
    302 
    303   def _SetupIndividualTestTimeoutScale(self, test):
    304     timeout_scale = self._GetIndividualTestTimeoutScale(test)
    305     valgrind_tools.SetChromeTimeoutScale(self.device, timeout_scale)
    306 
    307   def _GetIndividualTestTimeoutScale(self, test):
    308     """Returns the timeout scale for the given |test|."""
    309     annotations = self.test_pkg.GetTestAnnotations(test)
    310     timeout_scale = 1
    311     if 'TimeoutScale' in annotations:
    312       for annotation in annotations:
    313         scale_match = re.match('TimeoutScale:([0-9]+)', annotation)
    314         if scale_match:
    315           timeout_scale = int(scale_match.group(1))
    316     if self.options.wait_for_debugger:
    317       timeout_scale *= 100
    318     return timeout_scale
    319 
    320   def _GetIndividualTestTimeoutSecs(self, test):
    321     """Returns the timeout in seconds for the given |test|."""
    322     annotations = self.test_pkg.GetTestAnnotations(test)
    323     if 'Manual' in annotations:
    324       return 10 * 60 * 60
    325     if 'IntegrationTest' in annotations:
    326       return 30 * 60
    327     if 'External' in annotations:
    328       return 10 * 60
    329     if 'EnormousTest' in annotations:
    330       return 10 * 60
    331     if 'LargeTest' in annotations or _PERF_TEST_ANNOTATION in annotations:
    332       return 5 * 60
    333     if 'MediumTest' in annotations:
    334       return 3 * 60
    335     if 'SmallTest' in annotations:
    336       return 1 * 60
    337 
    338     logging.warn(("Test size not found in annotations for test '{0}', using " +
    339                   "1 minute for timeout.").format(test))
    340     return 1 * 60
    341 
    342   def _RunTest(self, test, timeout):
    343     """Runs a single instrumentation test.
    344 
    345     Args:
    346       test: Test class/method.
    347       timeout: Timeout time in seconds.
    348 
    349     Returns:
    350       The raw output of am instrument as a list of lines.
    351     """
    352     # Build the 'am instrument' command
    353     instrumentation_path = (
    354         '%s/%s' % (self.test_pkg.GetPackageName(), self.options.test_runner))
    355 
    356     cmd = ['am', 'instrument', '-r']
    357     for k, v in self._GetInstrumentationArgs().iteritems():
    358       cmd.extend(['-e', k, "'%s'" % v])
    359     cmd.extend(['-e', 'class', "'%s'" % test])
    360     cmd.extend(['-w', instrumentation_path])
    361     return self.device.RunShellCommand(cmd, timeout=timeout, retries=0)
    362 
    363   @staticmethod
    364   def _ParseAmInstrumentRawOutput(raw_output):
    365     """Parses the output of an |am instrument -r| call.
    366 
    367     Args:
    368       raw_output: the output of an |am instrument -r| call as a list of lines
    369     Returns:
    370       A 3-tuple containing:
    371         - the instrumentation code as an integer
    372         - the instrumentation result as a list of lines
    373         - the instrumentation statuses received as a list of 2-tuples
    374           containing:
    375           - the status code as an integer
    376           - the bundle dump as a dict mapping string keys to a list of
    377             strings, one for each line.
    378     """
    379     INSTR_STATUS = 'INSTRUMENTATION_STATUS: '
    380     INSTR_STATUS_CODE = 'INSTRUMENTATION_STATUS_CODE: '
    381     INSTR_RESULT = 'INSTRUMENTATION_RESULT: '
    382     INSTR_CODE = 'INSTRUMENTATION_CODE: '
    383 
    384     last = None
    385     instr_code = None
    386     instr_result = []
    387     instr_statuses = []
    388     bundle = {}
    389     for line in raw_output:
    390       if line.startswith(INSTR_STATUS):
    391         instr_var = line[len(INSTR_STATUS):]
    392         if '=' in instr_var:
    393           k, v = instr_var.split('=', 1)
    394           bundle[k] = [v]
    395           last = INSTR_STATUS
    396           last_key = k
    397         else:
    398           logging.debug('Unknown "%s" line: %s' % (INSTR_STATUS, line))
    399 
    400       elif line.startswith(INSTR_STATUS_CODE):
    401         instr_status = line[len(INSTR_STATUS_CODE):]
    402         instr_statuses.append((int(instr_status), bundle))
    403         bundle = {}
    404         last = INSTR_STATUS_CODE
    405 
    406       elif line.startswith(INSTR_RESULT):
    407         instr_result.append(line[len(INSTR_RESULT):])
    408         last = INSTR_RESULT
    409 
    410       elif line.startswith(INSTR_CODE):
    411         instr_code = int(line[len(INSTR_CODE):])
    412         last = INSTR_CODE
    413 
    414       elif last == INSTR_STATUS:
    415         bundle[last_key].append(line)
    416 
    417       elif last == INSTR_RESULT:
    418         instr_result.append(line)
    419 
    420     return (instr_code, instr_result, instr_statuses)
    421 
    422   def _GenerateTestResult(self, test, instr_statuses, start_ms, duration_ms):
    423     """Generate the result of |test| from |instr_statuses|.
    424 
    425     Args:
    426       instr_statuses: A list of 2-tuples containing:
    427         - the status code as an integer
    428         - the bundle dump as a dict mapping string keys to string values
    429         Note that this is the same as the third item in the 3-tuple returned by
    430         |_ParseAmInstrumentRawOutput|.
    431       start_ms: The start time of the test in milliseconds.
    432       duration_ms: The duration of the test in milliseconds.
    433     Returns:
    434       An InstrumentationTestResult object.
    435     """
    436     INSTR_STATUS_CODE_START = 1
    437     INSTR_STATUS_CODE_OK = 0
    438     INSTR_STATUS_CODE_ERROR = -1
    439     INSTR_STATUS_CODE_FAIL = -2
    440 
    441     log = ''
    442     result_type = base_test_result.ResultType.UNKNOWN
    443 
    444     for status_code, bundle in instr_statuses:
    445       if status_code == INSTR_STATUS_CODE_START:
    446         pass
    447       elif status_code == INSTR_STATUS_CODE_OK:
    448         bundle_test = '%s#%s' % (
    449             ''.join(bundle.get('class', [''])),
    450             ''.join(bundle.get('test', [''])))
    451         skipped = ''.join(bundle.get('test_skipped', ['']))
    452 
    453         if (test == bundle_test and
    454             result_type == base_test_result.ResultType.UNKNOWN):
    455           result_type = base_test_result.ResultType.PASS
    456         elif skipped.lower() in ('true', '1', 'yes'):
    457           result_type = base_test_result.ResultType.SKIP
    458           logging.info('Skipped ' + test)
    459       else:
    460         if status_code not in (INSTR_STATUS_CODE_ERROR,
    461                                INSTR_STATUS_CODE_FAIL):
    462           logging.info('Unrecognized status code %d. Handling as an error.',
    463                        status_code)
    464         result_type = base_test_result.ResultType.FAIL
    465         if 'stack' in bundle:
    466           log = '\n'.join(bundle['stack'])
    467         # Dismiss any error dialogs. Limit the number in case we have an error
    468         # loop or we are failing to dismiss.
    469         for _ in xrange(10):
    470           package = self.device.old_interface.DismissCrashDialogIfNeeded()
    471           if not package:
    472             break
    473           # Assume test package convention of ".test" suffix
    474           if package in self.test_pkg.GetPackageName():
    475             result_type = base_test_result.ResultType.CRASH
    476             break
    477 
    478     return test_result.InstrumentationTestResult(
    479         test, result_type, start_ms, duration_ms, log=log)
    480 
    481   #override
    482   def RunTest(self, test):
    483     results = base_test_result.TestRunResults()
    484     timeout = (self._GetIndividualTestTimeoutSecs(test) *
    485                self._GetIndividualTestTimeoutScale(test) *
    486                self.tool.GetTimeoutScale())
    487 
    488     start_ms = 0
    489     duration_ms = 0
    490     try:
    491       self.TestSetup(test)
    492 
    493       time_ms = lambda: int(time.time() * 1000)
    494       start_ms = time_ms()
    495       raw_output = self._RunTest(test, timeout)
    496       duration_ms = time_ms() - start_ms
    497 
    498       # Parse the test output
    499       _, _, statuses = self._ParseAmInstrumentRawOutput(raw_output)
    500       result = self._GenerateTestResult(test, statuses, start_ms, duration_ms)
    501       results.AddResult(result)
    502     except device_errors.CommandTimeoutError as e:
    503       results.AddResult(test_result.InstrumentationTestResult(
    504           test, base_test_result.ResultType.TIMEOUT, start_ms, duration_ms,
    505           log=str(e) or 'No information'))
    506     except device_errors.DeviceUnreachableError as e:
    507       results.AddResult(test_result.InstrumentationTestResult(
    508           test, base_test_result.ResultType.CRASH, start_ms, duration_ms,
    509           log=str(e) or 'No information'))
    510     self.TestTeardown(test, results)
    511     return (results, None if results.DidRunPass() else test)
    512