Home | History | Annotate | Download | only in perf
      1 # Copyright 2013 The Chromium Authors. All rights reserved.
      2 # Use of this source code is governed by a BSD-style license that can be
      3 # found in the LICENSE file.
      4 
      5 """Runs perf tests.
      6 
      7 Our buildbot infrastructure requires each slave to run steps serially.
      8 This is sub-optimal for android, where these steps can run independently on
      9 multiple connected devices.
     10 
     11 The buildbots will run this script multiple times per cycle:
     12 - First: all steps listed in --steps in will be executed in parallel using all
     13 connected devices. Step results will be pickled to disk. Each step has a unique
     14 name. The result code will be ignored if the step name is listed in
     15 --flaky-steps.
     16 The buildbot will treat this step as a regular step, and will not process any
     17 graph data.
     18 
     19 - Then, with -print-step STEP_NAME: at this stage, we'll simply print the file
     20 with the step results previously saved. The buildbot will then process the graph
     21 data accordingly.
     22 
     23 
     24 The JSON steps file contains a dictionary in the format:
     25 [
     26   ["step_name_foo", "script_to_execute foo"],
     27   ["step_name_bar", "script_to_execute bar"]
     28 ]
     29 
     30 This preserves the order in which the steps are executed.
     31 
     32 The JSON flaky steps file contains a list with step names which results should
     33 be ignored:
     34 [
     35   "step_name_foo",
     36   "step_name_bar"
     37 ]
     38 
     39 Note that script_to_execute necessarily have to take at least the following
     40 option:
     41   --device: the serial number to be passed to all adb commands.
     42 """
     43 
     44 import datetime
     45 import logging
     46 import os
     47 import pickle
     48 import sys
     49 import threading
     50 import time
     51 
     52 from pylib import constants
     53 from pylib import forwarder
     54 from pylib import pexpect
     55 from pylib.base import base_test_result
     56 from pylib.base import base_test_runner
     57 
     58 
     59 def PrintTestOutput(test_name):
     60   """Helper method to print the output of previously executed test_name.
     61 
     62   Args:
     63     test_name: name of the test that has been previously executed.
     64 
     65   Returns:
     66     exit code generated by the test step.
     67   """
     68   file_name = os.path.join(constants.PERF_OUTPUT_DIR, test_name)
     69   if not os.path.exists(file_name):
     70     logging.error('File not found %s', file_name)
     71     return 1
     72 
     73   with file(file_name, 'r') as f:
     74     persisted_result = pickle.loads(f.read())
     75   logging.info('*' * 80)
     76   logging.info('Output from:')
     77   logging.info(persisted_result['cmd'])
     78   logging.info('*' * 80)
     79   print persisted_result['output']
     80 
     81   return persisted_result['exit_code']
     82 
     83 
     84 class _HeartBeatLogger(object):
     85   # How often to print the heartbeat on flush().
     86   _PRINT_INTERVAL = 30.0
     87 
     88   def __init__(self):
     89     """A file-like class for keeping the buildbot alive."""
     90     self._len = 0
     91     self._tick = time.time()
     92     self._stopped = threading.Event()
     93     self._timer = threading.Thread(target=self._runner)
     94     self._timer.start()
     95 
     96   def _runner(self):
     97     while not self._stopped.is_set():
     98       self.flush()
     99       self._stopped.wait(_HeartBeatLogger._PRINT_INTERVAL)
    100 
    101   def write(self, data):
    102     self._len += len(data)
    103 
    104   def flush(self):
    105     now = time.time()
    106     if now - self._tick >= _HeartBeatLogger._PRINT_INTERVAL:
    107       self._tick = now
    108       print '--single-step output length %d' % self._len
    109       sys.stdout.flush()
    110 
    111   def stop(self):
    112     self._stopped.set()
    113 
    114 
    115 class TestRunner(base_test_runner.BaseTestRunner):
    116   def __init__(self, test_options, device, tests, flaky_tests):
    117     """A TestRunner instance runs a perf test on a single device.
    118 
    119     Args:
    120       test_options: A PerfOptions object.
    121       device: Device to run the tests.
    122       tests: a dict mapping test_name to command.
    123       flaky_tests: a list of flaky test_name.
    124     """
    125     super(TestRunner, self).__init__(device, None, 'Release')
    126     self._options = test_options
    127     self._tests = tests
    128     self._flaky_tests = flaky_tests
    129 
    130   @staticmethod
    131   def _IsBetter(result):
    132     if result['actual_exit_code'] == 0:
    133       return True
    134     pickled = os.path.join(constants.PERF_OUTPUT_DIR,
    135                            result['name'])
    136     if not os.path.exists(pickled):
    137       return True
    138     with file(pickled, 'r') as f:
    139       previous = pickle.loads(f.read())
    140     return result['actual_exit_code'] < previous['actual_exit_code']
    141 
    142   @staticmethod
    143   def _SaveResult(result):
    144     if TestRunner._IsBetter(result):
    145       with file(os.path.join(constants.PERF_OUTPUT_DIR,
    146                              result['name']), 'w') as f:
    147         f.write(pickle.dumps(result))
    148 
    149   def _LaunchPerfTest(self, test_name):
    150     """Runs a perf test.
    151 
    152     Args:
    153       test_name: the name of the test to be executed.
    154 
    155     Returns:
    156       A tuple containing (Output, base_test_result.ResultType)
    157     """
    158     try:
    159       logging.warning('Unmapping device ports')
    160       forwarder.Forwarder.UnmapAllDevicePorts(self.adb)
    161       self.adb.RestartAdbdOnDevice()
    162     except Exception as e:
    163       logging.error('Exception when tearing down device %s', e)
    164 
    165     cmd = ('%s --device %s' %
    166            (self._tests[test_name], self.device))
    167     logging.info('%s : %s', test_name, cmd)
    168     start_time = datetime.datetime.now()
    169 
    170     timeout = 5400
    171     if self._options.no_timeout:
    172       timeout = None
    173     full_cmd = cmd
    174     if self._options.dry_run:
    175       full_cmd = 'echo %s' % cmd
    176 
    177     logfile = sys.stdout
    178     if self._options.single_step:
    179       # Just print a heart-beat so that the outer buildbot scripts won't timeout
    180       # without response.
    181       logfile = _HeartBeatLogger()
    182     cwd = os.path.abspath(constants.DIR_SOURCE_ROOT)
    183     if full_cmd.startswith('src/'):
    184       cwd = os.path.abspath(os.path.join(constants.DIR_SOURCE_ROOT, os.pardir))
    185     output, exit_code = pexpect.run(
    186         full_cmd, cwd=cwd,
    187         withexitstatus=True, logfile=logfile, timeout=timeout,
    188         env=os.environ)
    189     if self._options.single_step:
    190       # Stop the logger.
    191       logfile.stop()
    192     end_time = datetime.datetime.now()
    193     if exit_code is None:
    194       exit_code = -1
    195     logging.info('%s : exit_code=%d in %d secs at %s',
    196                  test_name, exit_code, (end_time - start_time).seconds,
    197                  self.device)
    198     result_type = base_test_result.ResultType.FAIL
    199     if exit_code == 0:
    200       result_type = base_test_result.ResultType.PASS
    201     actual_exit_code = exit_code
    202     if test_name in self._flaky_tests:
    203       # The exit_code is used at the second stage when printing the
    204       # test output. If the test is flaky, force to "0" to get that step green
    205       # whilst still gathering data to the perf dashboards.
    206       # The result_type is used by the test_dispatcher to retry the test.
    207       exit_code = 0
    208 
    209     persisted_result = {
    210         'name': test_name,
    211         'output': output,
    212         'exit_code': exit_code,
    213         'actual_exit_code': actual_exit_code,
    214         'result_type': result_type,
    215         'total_time': (end_time - start_time).seconds,
    216         'device': self.device,
    217         'cmd': cmd,
    218     }
    219     self._SaveResult(persisted_result)
    220 
    221     return (output, result_type)
    222 
    223   def RunTest(self, test_name):
    224     """Run a perf test on the device.
    225 
    226     Args:
    227       test_name: String to use for logging the test result.
    228 
    229     Returns:
    230       A tuple of (TestRunResults, retry).
    231     """
    232     output, result_type = self._LaunchPerfTest(test_name)
    233     results = base_test_result.TestRunResults()
    234     results.AddResult(base_test_result.BaseTestResult(test_name, result_type))
    235     retry = None
    236     if not results.DidRunPass():
    237       retry = test_name
    238     return results, retry
    239