Home | History | Annotate | Download | only in instrumentation
      1 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 # Use of this source code is governed by a BSD-style license that can be
      3 # found in the LICENSE file.
      4 
      5 """Class for running instrumentation tests on a single device."""
      6 
      7 import logging
      8 import os
      9 import re
     10 import time
     11 
     12 from pylib import android_commands
     13 from pylib import constants
     14 from pylib import json_perf_parser
     15 from pylib import perf_tests_helper
     16 from pylib import valgrind_tools
     17 from pylib.base import base_test_result
     18 from pylib.base import base_test_runner
     19 
     20 import test_result
     21 
     22 
     23 _PERF_TEST_ANNOTATION = 'PerfTest'
     24 
     25 
     26 def _GetDataFilesForTestSuite(suite_basename):
     27   """Returns a list of data files/dirs needed by the test suite.
     28 
     29   Args:
     30     suite_basename: The test suite basename for which to return file paths.
     31 
     32   Returns:
     33     A list of test file and directory paths.
     34   """
     35   test_files = []
     36   if suite_basename in ['ChromeTest', 'ContentShellTest']:
     37     test_files += [
     38         'net/data/ssl/certificates/',
     39     ]
     40   return test_files
     41 
     42 
     43 class TestRunner(base_test_runner.BaseTestRunner):
     44   """Responsible for running a series of tests connected to a single device."""
     45 
     46   _DEVICE_DATA_DIR = 'chrome/test/data'
     47   _HOSTMACHINE_PERF_OUTPUT_FILE = '/tmp/chrome-profile'
     48   _DEVICE_PERF_OUTPUT_SEARCH_PREFIX = (constants.DEVICE_PERF_OUTPUT_DIR +
     49                                        '/chrome-profile*')
     50   _DEVICE_HAS_TEST_FILES = {}
     51 
     52   def __init__(self, test_options, device, shard_index, test_pkg,
     53                ports_to_forward):
     54     """Create a new TestRunner.
     55 
     56     Args:
     57       test_options: An InstrumentationOptions object.
     58       device: Attached android device.
     59       shard_index: Shard index.
     60       test_pkg: A TestPackage object.
     61       ports_to_forward: A list of port numbers for which to set up forwarders.
     62           Can be optionally requested by a test case.
     63     """
     64     super(TestRunner, self).__init__(device, test_options.tool,
     65                                      test_options.build_type,
     66                                      test_options.push_deps,
     67                                      test_options.cleanup_test_files)
     68     self._lighttp_port = constants.LIGHTTPD_RANDOM_PORT_FIRST + shard_index
     69 
     70     self.options = test_options
     71     self.test_pkg = test_pkg
     72     self.ports_to_forward = ports_to_forward
     73 
     74   #override
     75   def InstallTestPackage(self):
     76     self.test_pkg.Install(self.adb)
     77 
     78   #override
     79   def PushDataDeps(self):
     80     # TODO(frankf): Implement a general approach for copying/installing
     81     # once across test runners.
     82     if TestRunner._DEVICE_HAS_TEST_FILES.get(self.device, False):
     83       logging.warning('Already copied test files to device %s, skipping.',
     84                       self.device)
     85       return
     86 
     87     test_data = _GetDataFilesForTestSuite(self.test_pkg.GetApkName())
     88     if test_data:
     89       # Make sure SD card is ready.
     90       self.adb.WaitForSdCardReady(20)
     91       for p in test_data:
     92         self.adb.PushIfNeeded(
     93             os.path.join(constants.DIR_SOURCE_ROOT, p),
     94             os.path.join(self.adb.GetExternalStorage(), p))
     95 
     96     # TODO(frankf): Specify test data in this file as opposed to passing
     97     # as command-line.
     98     for dest_host_pair in self.options.test_data:
     99       dst_src = dest_host_pair.split(':',1)
    100       dst_layer = dst_src[0]
    101       host_src = dst_src[1]
    102       host_test_files_path = constants.DIR_SOURCE_ROOT + '/' + host_src
    103       if os.path.exists(host_test_files_path):
    104         self.adb.PushIfNeeded(host_test_files_path,
    105                               self.adb.GetExternalStorage() + '/' +
    106                               TestRunner._DEVICE_DATA_DIR + '/' + dst_layer)
    107     self.tool.CopyFiles()
    108     TestRunner._DEVICE_HAS_TEST_FILES[self.device] = True
    109 
    110   def _GetInstrumentationArgs(self):
    111     ret = {}
    112     if self.options.wait_for_debugger:
    113       ret['debug'] = 'true'
    114     return ret
    115 
    116   def _TakeScreenshot(self, test):
    117     """Takes a screenshot from the device."""
    118     screenshot_name = os.path.join(constants.SCREENSHOTS_DIR, test + '.png')
    119     logging.info('Taking screenshot named %s', screenshot_name)
    120     self.adb.TakeScreenshot(screenshot_name)
    121 
    122   def SetUp(self):
    123     """Sets up the test harness and device before all tests are run."""
    124     super(TestRunner, self).SetUp()
    125     if not self.adb.IsRootEnabled():
    126       logging.warning('Unable to enable java asserts for %s, non rooted device',
    127                       self.device)
    128     else:
    129       if self.adb.SetJavaAssertsEnabled(True):
    130         self.adb.Reboot(full_reboot=False)
    131 
    132     # We give different default value to launch HTTP server based on shard index
    133     # because it may have race condition when multiple processes are trying to
    134     # launch lighttpd with same port at same time.
    135     http_server_ports = self.LaunchTestHttpServer(
    136         os.path.join(constants.DIR_SOURCE_ROOT), self._lighttp_port)
    137     if self.ports_to_forward:
    138       self._ForwardPorts([(port, port) for port in self.ports_to_forward])
    139     self.flags.AddFlags(['--enable-test-intents'])
    140 
    141   def TearDown(self):
    142     """Cleans up the test harness and saves outstanding data from test run."""
    143     if self.ports_to_forward:
    144       self._UnmapPorts([(port, port) for port in self.ports_to_forward])
    145     super(TestRunner, self).TearDown()
    146 
    147   def TestSetup(self, test):
    148     """Sets up the test harness for running a particular test.
    149 
    150     Args:
    151       test: The name of the test that will be run.
    152     """
    153     self.SetupPerfMonitoringIfNeeded(test)
    154     self._SetupIndividualTestTimeoutScale(test)
    155     self.tool.SetupEnvironment()
    156 
    157     # Make sure the forwarder is still running.
    158     self._RestartHttpServerForwarderIfNecessary()
    159 
    160   def _IsPerfTest(self, test):
    161     """Determines whether a test is a performance test.
    162 
    163     Args:
    164       test: The name of the test to be checked.
    165 
    166     Returns:
    167       Whether the test is annotated as a performance test.
    168     """
    169     return _PERF_TEST_ANNOTATION in self.test_pkg.GetTestAnnotations(test)
    170 
    171   def SetupPerfMonitoringIfNeeded(self, test):
    172     """Sets up performance monitoring if the specified test requires it.
    173 
    174     Args:
    175       test: The name of the test to be run.
    176     """
    177     if not self._IsPerfTest(test):
    178       return
    179     self.adb.Adb().SendCommand('shell rm ' +
    180                                TestRunner._DEVICE_PERF_OUTPUT_SEARCH_PREFIX)
    181     self.adb.StartMonitoringLogcat()
    182 
    183   def TestTeardown(self, test, raw_result):
    184     """Cleans up the test harness after running a particular test.
    185 
    186     Depending on the options of this TestRunner this might handle performance
    187     tracking.  This method will only be called if the test passed.
    188 
    189     Args:
    190       test: The name of the test that was just run.
    191       raw_result: result for this test.
    192     """
    193 
    194     self.tool.CleanUpEnvironment()
    195 
    196     # The logic below relies on the test passing.
    197     if not raw_result or raw_result.GetStatusCode():
    198       return
    199 
    200     self.TearDownPerfMonitoring(test)
    201 
    202   def TearDownPerfMonitoring(self, test):
    203     """Cleans up performance monitoring if the specified test required it.
    204 
    205     Args:
    206       test: The name of the test that was just run.
    207     Raises:
    208       Exception: if there's anything wrong with the perf data.
    209     """
    210     if not self._IsPerfTest(test):
    211       return
    212     raw_test_name = test.split('#')[1]
    213 
    214     # Wait and grab annotation data so we can figure out which traces to parse
    215     regex = self.adb.WaitForLogMatch(re.compile('\*\*PERFANNOTATION\(' +
    216                                                 raw_test_name +
    217                                                 '\)\:(.*)'), None)
    218 
    219     # If the test is set to run on a specific device type only (IE: only
    220     # tablet or phone) and it is being run on the wrong device, the test
    221     # just quits and does not do anything.  The java test harness will still
    222     # print the appropriate annotation for us, but will add --NORUN-- for
    223     # us so we know to ignore the results.
    224     # The --NORUN-- tag is managed by MainActivityTestBase.java
    225     if regex.group(1) != '--NORUN--':
    226 
    227       # Obtain the relevant perf data.  The data is dumped to a
    228       # JSON formatted file.
    229       json_string = self.adb.GetProtectedFileContents(
    230           '/data/data/com.google.android.apps.chrome/files/PerfTestData.txt')
    231 
    232       if json_string:
    233         json_string = '\n'.join(json_string)
    234       else:
    235         raise Exception('Perf file does not exist or is empty')
    236 
    237       if self.options.save_perf_json:
    238         json_local_file = '/tmp/chromium-android-perf-json-' + raw_test_name
    239         with open(json_local_file, 'w') as f:
    240           f.write(json_string)
    241         logging.info('Saving Perf UI JSON from test ' +
    242                      test + ' to ' + json_local_file)
    243 
    244       raw_perf_data = regex.group(1).split(';')
    245 
    246       for raw_perf_set in raw_perf_data:
    247         if raw_perf_set:
    248           perf_set = raw_perf_set.split(',')
    249           if len(perf_set) != 3:
    250             raise Exception('Unexpected number of tokens in perf annotation '
    251                             'string: ' + raw_perf_set)
    252 
    253           # Process the performance data
    254           result = json_perf_parser.GetAverageRunInfoFromJSONString(json_string,
    255                                                                     perf_set[0])
    256           perf_tests_helper.PrintPerfResult(perf_set[1], perf_set[2],
    257                                             [result['average']],
    258                                             result['units'])
    259 
    260   def _SetupIndividualTestTimeoutScale(self, test):
    261     timeout_scale = self._GetIndividualTestTimeoutScale(test)
    262     valgrind_tools.SetChromeTimeoutScale(self.adb, timeout_scale)
    263 
    264   def _GetIndividualTestTimeoutScale(self, test):
    265     """Returns the timeout scale for the given |test|."""
    266     annotations = self.test_pkg.GetTestAnnotations(test)
    267     timeout_scale = 1
    268     if 'TimeoutScale' in annotations:
    269       for annotation in annotations:
    270         scale_match = re.match('TimeoutScale:([0-9]+)', annotation)
    271         if scale_match:
    272           timeout_scale = int(scale_match.group(1))
    273     if self.options.wait_for_debugger:
    274       timeout_scale *= 100
    275     return timeout_scale
    276 
    277   def _GetIndividualTestTimeoutSecs(self, test):
    278     """Returns the timeout in seconds for the given |test|."""
    279     annotations = self.test_pkg.GetTestAnnotations(test)
    280     if 'Manual' in annotations:
    281       return 600 * 60
    282     if 'External' in annotations:
    283       return 10 * 60
    284     if 'LargeTest' in annotations or _PERF_TEST_ANNOTATION in annotations:
    285       return 5 * 60
    286     if 'MediumTest' in annotations:
    287       return 3 * 60
    288     return 1 * 60
    289 
    290   def _RunTest(self, test, timeout):
    291     try:
    292       return self.adb.RunInstrumentationTest(
    293           test, self.test_pkg.GetPackageName(),
    294           self._GetInstrumentationArgs(), timeout)
    295     except android_commands.errors.WaitForResponseTimedOutError:
    296       logging.info('Ran the test with timeout of %ds.' % timeout)
    297       raise
    298 
    299   #override
    300   def RunTest(self, test):
    301     raw_result = None
    302     start_date_ms = None
    303     results = base_test_result.TestRunResults()
    304     timeout=(self._GetIndividualTestTimeoutSecs(test) *
    305              self._GetIndividualTestTimeoutScale(test) *
    306              self.tool.GetTimeoutScale())
    307     try:
    308       self.TestSetup(test)
    309       start_date_ms = int(time.time()) * 1000
    310       raw_result = self._RunTest(test, timeout)
    311       duration_ms = int(time.time()) * 1000 - start_date_ms
    312       status_code = raw_result.GetStatusCode()
    313       if status_code:
    314         log = raw_result.GetFailureReason()
    315         if not log:
    316           log = 'No information.'
    317         if (self.options.screenshot_failures or
    318             log.find('INJECT_EVENTS perm') >= 0):
    319           self._TakeScreenshot(test)
    320         result = test_result.InstrumentationTestResult(
    321             test, base_test_result.ResultType.FAIL, start_date_ms, duration_ms,
    322             log=log)
    323       else:
    324         result = test_result.InstrumentationTestResult(
    325             test, base_test_result.ResultType.PASS, start_date_ms, duration_ms)
    326       results.AddResult(result)
    327     # Catch exceptions thrown by StartInstrumentation().
    328     # See ../../third_party/android/testrunner/adb_interface.py
    329     except (android_commands.errors.WaitForResponseTimedOutError,
    330             android_commands.errors.DeviceUnresponsiveError,
    331             android_commands.errors.InstrumentationError), e:
    332       if start_date_ms:
    333         duration_ms = int(time.time()) * 1000 - start_date_ms
    334       else:
    335         start_date_ms = int(time.time()) * 1000
    336         duration_ms = 0
    337       message = str(e)
    338       if not message:
    339         message = 'No information.'
    340       results.AddResult(test_result.InstrumentationTestResult(
    341           test, base_test_result.ResultType.CRASH, start_date_ms, duration_ms,
    342           log=message))
    343       raw_result = None
    344     self.TestTeardown(test, raw_result)
    345     return (results, None if results.DidRunPass() else test)
    346