Home | History | Annotate | Download | only in gtest
      1 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
      2 # Use of this source code is governed by a BSD-style license that can be
      3 # found in the LICENSE file.
      4 
      5 import logging
      6 import os
      7 import re
      8 
      9 from pylib import constants
     10 from pylib import pexpect
     11 from pylib.base import base_test_result
     12 from pylib.base import base_test_runner
     13 from pylib.device import device_errors
     14 from pylib.perf import perf_control
     15 
     16 
     17 def _TestSuiteRequiresMockTestServer(suite_name):
     18   """Returns True if the test suite requires mock test server."""
     19   tests_require_net_test_server = ['unit_tests', 'net_unittests',
     20                                    'content_unittests',
     21                                    'content_browsertests']
     22   return (suite_name in
     23           tests_require_net_test_server)
     24 
     25 def _TestSuiteRequiresHighPerfMode(suite_name):
     26   """Returns True if the test suite requires high performance mode."""
     27   return 'perftests' in suite_name
     28 
     29 class TestRunner(base_test_runner.BaseTestRunner):
     30   def __init__(self, test_options, device, test_package):
     31     """Single test suite attached to a single device.
     32 
     33     Args:
     34       test_options: A GTestOptions object.
     35       device: Device to run the tests.
     36       test_package: An instance of TestPackage class.
     37     """
     38 
     39     super(TestRunner, self).__init__(device, test_options.tool,
     40                                      test_options.push_deps,
     41                                      test_options.cleanup_test_files)
     42 
     43     self.test_package = test_package
     44     self.test_package.tool = self.tool
     45     self._test_arguments = test_options.test_arguments
     46 
     47     timeout = test_options.timeout
     48     if timeout == 0:
     49       timeout = 60
     50     # On a VM (e.g. chromium buildbots), this timeout is way too small.
     51     if os.environ.get('BUILDBOT_SLAVENAME'):
     52       timeout = timeout * 2
     53 
     54     self._timeout = timeout * self.tool.GetTimeoutScale()
     55     if _TestSuiteRequiresHighPerfMode(self.test_package.suite_name):
     56       self._perf_controller = perf_control.PerfControl(self.device)
     57 
     58   #override
     59   def InstallTestPackage(self):
     60     self.test_package.Install(self.device)
     61 
     62   #override
     63   def PushDataDeps(self):
     64     self.device.WaitUntilFullyBooted(timeout=20)
     65     self.tool.CopyFiles()
     66     if os.path.exists(constants.ISOLATE_DEPS_DIR):
     67       # TODO(frankf): linux_dumper_unittest_helper needs to be in the same dir
     68       # as breakpad_unittests exe. Find a better way to do this.
     69       if self.test_package.suite_name == 'breakpad_unittests':
     70         device_dir = constants.TEST_EXECUTABLE_DIR
     71       else:
     72         device_dir = self.device.GetExternalStoragePath()
     73       for p in os.listdir(constants.ISOLATE_DEPS_DIR):
     74         self.device.PushChangedFiles(
     75             os.path.join(constants.ISOLATE_DEPS_DIR, p),
     76             os.path.join(device_dir, p))
     77 
     78   def _ParseTestOutput(self, p):
     79     """Process the test output.
     80 
     81     Args:
     82       p: An instance of pexpect spawn class.
     83 
     84     Returns:
     85       A TestRunResults object.
     86     """
     87     results = base_test_result.TestRunResults()
     88 
     89     # Test case statuses.
     90     re_run = re.compile('\[ RUN      \] ?(.*)\r\n')
     91     re_fail = re.compile('\[  FAILED  \] ?(.*)\r\n')
     92     re_ok = re.compile('\[       OK \] ?(.*?) .*\r\n')
     93 
     94     # Test run statuses.
     95     re_passed = re.compile('\[  PASSED  \] ?(.*)\r\n')
     96     re_runner_fail = re.compile('\[ RUNNER_FAILED \] ?(.*)\r\n')
     97     # Signal handlers are installed before starting tests
     98     # to output the CRASHED marker when a crash happens.
     99     re_crash = re.compile('\[ CRASHED      \](.*)\r\n')
    100 
    101     log = ''
    102     try:
    103       while True:
    104         full_test_name = None
    105         found = p.expect([re_run, re_passed, re_runner_fail],
    106                          timeout=self._timeout)
    107         if found == 1:  # re_passed
    108           break
    109         elif found == 2:  # re_runner_fail
    110           break
    111         else:  # re_run
    112           full_test_name = p.match.group(1).replace('\r', '')
    113           found = p.expect([re_ok, re_fail, re_crash], timeout=self._timeout)
    114           log = p.before.replace('\r', '')
    115           if found == 0:  # re_ok
    116             if full_test_name == p.match.group(1).replace('\r', ''):
    117               results.AddResult(base_test_result.BaseTestResult(
    118                   full_test_name, base_test_result.ResultType.PASS,
    119                   log=log))
    120           elif found == 2:  # re_crash
    121             results.AddResult(base_test_result.BaseTestResult(
    122                 full_test_name, base_test_result.ResultType.CRASH,
    123                 log=log))
    124             break
    125           else:  # re_fail
    126             results.AddResult(base_test_result.BaseTestResult(
    127                 full_test_name, base_test_result.ResultType.FAIL, log=log))
    128     except pexpect.EOF:
    129       logging.error('Test terminated - EOF')
    130       # We're here because either the device went offline, or the test harness
    131       # crashed without outputting the CRASHED marker (crbug.com/175538).
    132       if not self.device.IsOnline():
    133         raise device_errors.DeviceUnreachableError(
    134             'Device %s went offline.' % str(self.device))
    135       if full_test_name:
    136         results.AddResult(base_test_result.BaseTestResult(
    137             full_test_name, base_test_result.ResultType.CRASH,
    138             log=p.before.replace('\r', '')))
    139     except pexpect.TIMEOUT:
    140       logging.error('Test terminated after %d second timeout.',
    141                     self._timeout)
    142       if full_test_name:
    143         results.AddResult(base_test_result.BaseTestResult(
    144             full_test_name, base_test_result.ResultType.TIMEOUT,
    145             log=p.before.replace('\r', '')))
    146     finally:
    147       p.close()
    148 
    149     ret_code = self.test_package.GetGTestReturnCode(self.device)
    150     if ret_code:
    151       logging.critical(
    152           'gtest exit code: %d\npexpect.before: %s\npexpect.after: %s',
    153           ret_code, p.before, p.after)
    154 
    155     return results
    156 
    157   #override
    158   def RunTest(self, test):
    159     test_results = base_test_result.TestRunResults()
    160     if not test:
    161       return test_results, None
    162 
    163     try:
    164       self.test_package.ClearApplicationState(self.device)
    165       self.test_package.CreateCommandLineFileOnDevice(
    166           self.device, test, self._test_arguments)
    167       test_results = self._ParseTestOutput(
    168           self.test_package.SpawnTestProcess(self.device))
    169     finally:
    170       self.CleanupSpawningServerState()
    171     # Calculate unknown test results.
    172     all_tests = set(test.split(':'))
    173     all_tests_ran = set([t.GetName() for t in test_results.GetAll()])
    174     unknown_tests = all_tests - all_tests_ran
    175     test_results.AddResults(
    176         [base_test_result.BaseTestResult(t, base_test_result.ResultType.UNKNOWN)
    177          for t in unknown_tests])
    178     retry = ':'.join([t.GetName() for t in test_results.GetNotPass()])
    179     return test_results, retry
    180 
    181   #override
    182   def SetUp(self):
    183     """Sets up necessary test enviroment for the test suite."""
    184     super(TestRunner, self).SetUp()
    185     if _TestSuiteRequiresMockTestServer(self.test_package.suite_name):
    186       self.LaunchChromeTestServerSpawner()
    187     if _TestSuiteRequiresHighPerfMode(self.test_package.suite_name):
    188       self._perf_controller.SetHighPerfMode()
    189     self.tool.SetupEnvironment()
    190 
    191   #override
    192   def TearDown(self):
    193     """Cleans up the test enviroment for the test suite."""
    194     if _TestSuiteRequiresHighPerfMode(self.test_package.suite_name):
    195       self._perf_controller.SetDefaultPerfMode()
    196     self.test_package.ClearApplicationState(self.device)
    197     self.tool.CleanUpEnvironment()
    198     super(TestRunner, self).TearDown()
    199