Home | History | Annotate | Download | only in instrumentation
      1 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 # Use of this source code is governed by a BSD-style license that can be
      3 # found in the LICENSE file.
      4 
      5 """Class for running instrumentation tests on a single device."""
      6 
      7 import logging
      8 import os
      9 import re
     10 import sys
     11 import time
     12 
     13 from pylib import android_commands
     14 from pylib import constants
     15 from pylib import flag_changer
     16 from pylib import valgrind_tools
     17 from pylib.base import base_test_result
     18 from pylib.base import base_test_runner
     19 from pylib.device import device_errors
     20 from pylib.instrumentation import json_perf_parser
     21 from pylib.instrumentation import test_result
     22 
     23 sys.path.append(os.path.join(sys.path[0],
     24                              os.pardir, os.pardir, 'build', 'util', 'lib',
     25                              'common'))
     26 import perf_tests_results_helper # pylint: disable=F0401
     27 
     28 
     29 _PERF_TEST_ANNOTATION = 'PerfTest'
     30 
     31 
     32 def _GetDataFilesForTestSuite(suite_basename):
     33   """Returns a list of data files/dirs needed by the test suite.
     34 
     35   Args:
     36     suite_basename: The test suite basename for which to return file paths.
     37 
     38   Returns:
     39     A list of test file and directory paths.
     40   """
     41   test_files = []
     42   if suite_basename in ['ChromeTest', 'ContentShellTest']:
     43     test_files += [
     44         'net/data/ssl/certificates/',
     45     ]
     46   return test_files
     47 
     48 
     49 class TestRunner(base_test_runner.BaseTestRunner):
     50   """Responsible for running a series of tests connected to a single device."""
     51 
     52   _DEVICE_DATA_DIR = 'chrome/test/data'
     53   _DEVICE_COVERAGE_DIR = 'chrome/test/coverage'
     54   _HOSTMACHINE_PERF_OUTPUT_FILE = '/tmp/chrome-profile'
     55   _DEVICE_PERF_OUTPUT_SEARCH_PREFIX = (constants.DEVICE_PERF_OUTPUT_DIR +
     56                                        '/chrome-profile*')
     57   _DEVICE_HAS_TEST_FILES = {}
     58 
     59   def __init__(self, test_options, device, shard_index, test_pkg,
     60                additional_flags=None):
     61     """Create a new TestRunner.
     62 
     63     Args:
     64       test_options: An InstrumentationOptions object.
     65       device: Attached android device.
     66       shard_index: Shard index.
     67       test_pkg: A TestPackage object.
     68       additional_flags: A list of additional flags to add to the command line.
     69     """
     70     super(TestRunner, self).__init__(device, test_options.tool,
     71                                      test_options.push_deps,
     72                                      test_options.cleanup_test_files)
     73     self._lighttp_port = constants.LIGHTTPD_RANDOM_PORT_FIRST + shard_index
     74 
     75     self.coverage_device_file = None
     76     self.coverage_dir = test_options.coverage_dir
     77     self.coverage_host_file = None
     78     self.options = test_options
     79     self.test_pkg = test_pkg
     80     # Use the correct command line file for the package under test.
     81     cmdline_file = [a.cmdline_file for a in constants.PACKAGE_INFO.itervalues()
     82                     if a.test_package == self.test_pkg.GetPackageName()]
     83     assert len(cmdline_file) < 2, 'Multiple packages have the same test package'
     84     if len(cmdline_file) and cmdline_file[0]:
     85       self.flags = flag_changer.FlagChanger(self.device, cmdline_file[0])
     86       if additional_flags:
     87         self.flags.AddFlags(additional_flags)
     88     else:
     89       self.flags = None
     90 
     91   #override
     92   def InstallTestPackage(self):
     93     self.test_pkg.Install(self.device)
     94 
     95   #override
     96   def PushDataDeps(self):
     97     # TODO(frankf): Implement a general approach for copying/installing
     98     # once across test runners.
     99     if TestRunner._DEVICE_HAS_TEST_FILES.get(self.device, False):
    100       logging.warning('Already copied test files to device %s, skipping.',
    101                       self.device.old_interface.GetDevice())
    102       return
    103 
    104     test_data = _GetDataFilesForTestSuite(self.test_pkg.GetApkName())
    105     if test_data:
    106       # Make sure SD card is ready.
    107       self.device.WaitUntilFullyBooted(timeout=20)
    108       for p in test_data:
    109         self.device.old_interface.PushIfNeeded(
    110             os.path.join(constants.DIR_SOURCE_ROOT, p),
    111             os.path.join(self.device.GetExternalStoragePath(), p))
    112 
    113     # TODO(frankf): Specify test data in this file as opposed to passing
    114     # as command-line.
    115     for dest_host_pair in self.options.test_data:
    116       dst_src = dest_host_pair.split(':', 1)
    117       dst_layer = dst_src[0]
    118       host_src = dst_src[1]
    119       host_test_files_path = '%s/%s' % (constants.DIR_SOURCE_ROOT, host_src)
    120       if os.path.exists(host_test_files_path):
    121         self.device.old_interface.PushIfNeeded(
    122             host_test_files_path,
    123             '%s/%s/%s' % (
    124                 self.device.GetExternalStoragePath(),
    125                 TestRunner._DEVICE_DATA_DIR,
    126                 dst_layer))
    127     self.tool.CopyFiles()
    128     TestRunner._DEVICE_HAS_TEST_FILES[
    129         self.device.old_interface.GetDevice()] = True
    130 
    131   def _GetInstrumentationArgs(self):
    132     ret = {}
    133     if self.options.wait_for_debugger:
    134       ret['debug'] = 'true'
    135     if self.coverage_dir:
    136       ret['coverage'] = 'true'
    137       ret['coverageFile'] = self.coverage_device_file
    138 
    139     return ret
    140 
    141   def _TakeScreenshot(self, test):
    142     """Takes a screenshot from the device."""
    143     screenshot_name = os.path.join(constants.SCREENSHOTS_DIR, '%s.png' % test)
    144     logging.info('Taking screenshot named %s', screenshot_name)
    145     self.device.old_interface.TakeScreenshot(screenshot_name)
    146 
    147   def SetUp(self):
    148     """Sets up the test harness and device before all tests are run."""
    149     super(TestRunner, self).SetUp()
    150     if not self.device.HasRoot():
    151       logging.warning('Unable to enable java asserts for %s, non rooted device',
    152                       str(self.device))
    153     else:
    154       if self.device.old_interface.SetJavaAssertsEnabled(True):
    155         # TODO(jbudorick) How to best do shell restart after the
    156         #                 android_commands refactor?
    157         self.device.RunShellCommand('stop')
    158         self.device.RunShellCommand('start')
    159 
    160     # We give different default value to launch HTTP server based on shard index
    161     # because it may have race condition when multiple processes are trying to
    162     # launch lighttpd with same port at same time.
    163     self.LaunchTestHttpServer(
    164         os.path.join(constants.DIR_SOURCE_ROOT), self._lighttp_port)
    165     if self.flags:
    166       self.flags.AddFlags(['--disable-fre', '--enable-test-intents'])
    167 
    168   def TearDown(self):
    169     """Cleans up the test harness and saves outstanding data from test run."""
    170     if self.flags:
    171       self.flags.Restore()
    172     super(TestRunner, self).TearDown()
    173 
    174   def TestSetup(self, test):
    175     """Sets up the test harness for running a particular test.
    176 
    177     Args:
    178       test: The name of the test that will be run.
    179     """
    180     self.SetupPerfMonitoringIfNeeded(test)
    181     self._SetupIndividualTestTimeoutScale(test)
    182     self.tool.SetupEnvironment()
    183 
    184     # Make sure the forwarder is still running.
    185     self._RestartHttpServerForwarderIfNecessary()
    186 
    187     if self.coverage_dir:
    188       coverage_basename = '%s.ec' % test
    189       self.coverage_device_file = '%s/%s/%s' % (
    190           self.device.GetExternalStoragePath(),
    191           TestRunner._DEVICE_COVERAGE_DIR, coverage_basename)
    192       self.coverage_host_file = os.path.join(
    193           self.coverage_dir, coverage_basename)
    194 
    195   def _IsPerfTest(self, test):
    196     """Determines whether a test is a performance test.
    197 
    198     Args:
    199       test: The name of the test to be checked.
    200 
    201     Returns:
    202       Whether the test is annotated as a performance test.
    203     """
    204     return _PERF_TEST_ANNOTATION in self.test_pkg.GetTestAnnotations(test)
    205 
    206   def SetupPerfMonitoringIfNeeded(self, test):
    207     """Sets up performance monitoring if the specified test requires it.
    208 
    209     Args:
    210       test: The name of the test to be run.
    211     """
    212     if not self._IsPerfTest(test):
    213       return
    214     self.device.old_interface.Adb().SendCommand(
    215         'shell rm ' + TestRunner._DEVICE_PERF_OUTPUT_SEARCH_PREFIX)
    216     self.device.old_interface.StartMonitoringLogcat()
    217 
    218   def TestTeardown(self, test, raw_result):
    219     """Cleans up the test harness after running a particular test.
    220 
    221     Depending on the options of this TestRunner this might handle performance
    222     tracking.  This method will only be called if the test passed.
    223 
    224     Args:
    225       test: The name of the test that was just run.
    226       raw_result: result for this test.
    227     """
    228 
    229     self.tool.CleanUpEnvironment()
    230 
    231     # The logic below relies on the test passing.
    232     if not raw_result or raw_result.GetStatusCode():
    233       return
    234 
    235     self.TearDownPerfMonitoring(test)
    236 
    237     if self.coverage_dir:
    238       self.device.old_interface.Adb().Pull(
    239           self.coverage_device_file, self.coverage_host_file)
    240       self.device.RunShellCommand(
    241           'rm -f %s' % self.coverage_device_file)
    242 
    243   def TearDownPerfMonitoring(self, test):
    244     """Cleans up performance monitoring if the specified test required it.
    245 
    246     Args:
    247       test: The name of the test that was just run.
    248     Raises:
    249       Exception: if there's anything wrong with the perf data.
    250     """
    251     if not self._IsPerfTest(test):
    252       return
    253     raw_test_name = test.split('#')[1]
    254 
    255     # Wait and grab annotation data so we can figure out which traces to parse
    256     regex = self.device.old_interface.WaitForLogMatch(
    257         re.compile('\*\*PERFANNOTATION\(' + raw_test_name + '\)\:(.*)'), None)
    258 
    259     # If the test is set to run on a specific device type only (IE: only
    260     # tablet or phone) and it is being run on the wrong device, the test
    261     # just quits and does not do anything.  The java test harness will still
    262     # print the appropriate annotation for us, but will add --NORUN-- for
    263     # us so we know to ignore the results.
    264     # The --NORUN-- tag is managed by MainActivityTestBase.java
    265     if regex.group(1) != '--NORUN--':
    266 
    267       # Obtain the relevant perf data.  The data is dumped to a
    268       # JSON formatted file.
    269       json_string = self.device.old_interface.GetProtectedFileContents(
    270           '/data/data/com.google.android.apps.chrome/files/PerfTestData.txt')
    271 
    272       if json_string:
    273         json_string = '\n'.join(json_string)
    274       else:
    275         raise Exception('Perf file does not exist or is empty')
    276 
    277       if self.options.save_perf_json:
    278         json_local_file = '/tmp/chromium-android-perf-json-' + raw_test_name
    279         with open(json_local_file, 'w') as f:
    280           f.write(json_string)
    281         logging.info('Saving Perf UI JSON from test ' +
    282                      test + ' to ' + json_local_file)
    283 
    284       raw_perf_data = regex.group(1).split(';')
    285 
    286       for raw_perf_set in raw_perf_data:
    287         if raw_perf_set:
    288           perf_set = raw_perf_set.split(',')
    289           if len(perf_set) != 3:
    290             raise Exception('Unexpected number of tokens in perf annotation '
    291                             'string: ' + raw_perf_set)
    292 
    293           # Process the performance data
    294           result = json_perf_parser.GetAverageRunInfoFromJSONString(json_string,
    295                                                                     perf_set[0])
    296           perf_tests_results_helper.PrintPerfResult(perf_set[1], perf_set[2],
    297                                                     [result['average']],
    298                                                     result['units'])
    299 
    300   def _SetupIndividualTestTimeoutScale(self, test):
    301     timeout_scale = self._GetIndividualTestTimeoutScale(test)
    302     valgrind_tools.SetChromeTimeoutScale(self.device, timeout_scale)
    303 
    304   def _GetIndividualTestTimeoutScale(self, test):
    305     """Returns the timeout scale for the given |test|."""
    306     annotations = self.test_pkg.GetTestAnnotations(test)
    307     timeout_scale = 1
    308     if 'TimeoutScale' in annotations:
    309       for annotation in annotations:
    310         scale_match = re.match('TimeoutScale:([0-9]+)', annotation)
    311         if scale_match:
    312           timeout_scale = int(scale_match.group(1))
    313     if self.options.wait_for_debugger:
    314       timeout_scale *= 100
    315     return timeout_scale
    316 
    317   def _GetIndividualTestTimeoutSecs(self, test):
    318     """Returns the timeout in seconds for the given |test|."""
    319     annotations = self.test_pkg.GetTestAnnotations(test)
    320     if 'Manual' in annotations:
    321       return 600 * 60
    322     if 'External' in annotations:
    323       return 10 * 60
    324     if 'LargeTest' in annotations or _PERF_TEST_ANNOTATION in annotations:
    325       return 5 * 60
    326     if 'MediumTest' in annotations:
    327       return 3 * 60
    328     return 1 * 60
    329 
    330   def _RunTest(self, test, timeout):
    331     try:
    332       return self.device.old_interface.RunInstrumentationTest(
    333           test, self.test_pkg.GetPackageName(),
    334           self._GetInstrumentationArgs(), timeout)
    335     except (device_errors.CommandTimeoutError,
    336             # TODO(jbudorick) Remove this once the underlying implementations
    337             #                 for the above are switched or wrapped.
    338             android_commands.errors.WaitForResponseTimedOutError):
    339       logging.info('Ran the test with timeout of %ds.' % timeout)
    340       raise
    341 
    342   #override
    343   def RunTest(self, test):
    344     raw_result = None
    345     start_date_ms = None
    346     results = base_test_result.TestRunResults()
    347     timeout = (self._GetIndividualTestTimeoutSecs(test) *
    348                self._GetIndividualTestTimeoutScale(test) *
    349                self.tool.GetTimeoutScale())
    350     try:
    351       self.TestSetup(test)
    352       start_date_ms = int(time.time()) * 1000
    353       raw_result = self._RunTest(test, timeout)
    354       duration_ms = int(time.time()) * 1000 - start_date_ms
    355       status_code = raw_result.GetStatusCode()
    356       if status_code:
    357         if self.options.screenshot_failures:
    358           self._TakeScreenshot(test)
    359         log = raw_result.GetFailureReason()
    360         if not log:
    361           log = 'No information.'
    362         result_type = base_test_result.ResultType.FAIL
    363         package = self.device.old_interface.DismissCrashDialogIfNeeded()
    364         # Assume test package convention of ".test" suffix
    365         if package and package in self.test_pkg.GetPackageName():
    366           result_type = base_test_result.ResultType.CRASH
    367         result = test_result.InstrumentationTestResult(
    368             test, result_type, start_date_ms, duration_ms, log=log)
    369       else:
    370         result = test_result.InstrumentationTestResult(
    371             test, base_test_result.ResultType.PASS, start_date_ms, duration_ms)
    372       results.AddResult(result)
    373     # Catch exceptions thrown by StartInstrumentation().
    374     # See ../../third_party/android/testrunner/adb_interface.py
    375     except (device_errors.CommandTimeoutError,
    376             device_errors.DeviceUnreachableError,
    377             # TODO(jbudorick) Remove these once the underlying implementations
    378             #                 for the above are switched or wrapped.
    379             android_commands.errors.WaitForResponseTimedOutError,
    380             android_commands.errors.DeviceUnresponsiveError,
    381             android_commands.errors.InstrumentationError), e:
    382       if start_date_ms:
    383         duration_ms = int(time.time()) * 1000 - start_date_ms
    384       else:
    385         start_date_ms = int(time.time()) * 1000
    386         duration_ms = 0
    387       message = str(e)
    388       if not message:
    389         message = 'No information.'
    390       results.AddResult(test_result.InstrumentationTestResult(
    391           test, base_test_result.ResultType.CRASH, start_date_ms, duration_ms,
    392           log=message))
    393       raw_result = None
    394     self.TestTeardown(test, raw_result)
    395     return (results, None if results.DidRunPass() else test)
    396