1 # Copyright (c) 2010 The Chromium OS Authors. All rights reserved. 2 # Use of this source code is governed by a BSD-style license that can be 3 # found in the LICENSE file. 4 5 import logging 6 import os 7 8 from autotest_lib.client.bin import test, utils 9 from autotest_lib.client.common_lib import error 10 from autotest_lib.client.cros import perf 11 from autotest_lib.client.cros import service_stopper 12 from autotest_lib.client.cros.graphics import graphics_utils 13 14 15 class graphics_GLBench(graphics_utils.GraphicsTest): 16 """Run glbench, a benchmark that times graphics intensive activities.""" 17 version = 1 18 preserve_srcdir = True 19 _services = None 20 21 # Good images. 22 reference_images_file = 'deps/glbench/glbench_reference_images.txt' 23 # Images that are bad but for which the bug has not been fixed yet. 24 knownbad_images_file = 'deps/glbench/glbench_knownbad_images.txt' 25 # Images that are bad and for which a fix has been submitted. 26 fixedbad_images_file = 'deps/glbench/glbench_fixedbad_images.txt' 27 28 # These tests do not draw anything, they can only be used to check 29 # performance. 30 no_checksum_tests = set([ 31 'compositing_no_fill', 32 'pixel_read', 33 'texture_reuse_luminance_teximage2d', 34 'texture_reuse_luminance_texsubimage2d', 35 'texture_reuse_rgba_teximage2d', 36 'texture_reuse_rgba_texsubimage2d', 37 'context_glsimple', 38 'swap_glsimple', 39 ]) 40 41 blacklist = '' 42 43 unit_higher_is_better = { 44 'mbytes_sec': True, 45 'mpixels_sec': True, 46 'mtexel_sec': True, 47 'mtri_sec': True, 48 'mvtx_sec': True, 49 'us': False, 50 '1280x768_fps': True 51 } 52 53 def setup(self): 54 self.job.setup_dep(['glbench']) 55 56 def initialize(self): 57 super(graphics_GLBench, self).initialize() 58 # If UI is running, we must stop it and restore later. 59 self._services = service_stopper.ServiceStopper(['ui']) 60 self._services.stop_services() 61 62 def cleanup(self): 63 if self._services: 64 self._services.restore_services() 65 super(graphics_GLBench, self).cleanup() 66 67 def report_temperature(self, keyname): 68 """Report current max observed temperature with given keyname. 69 70 @param keyname: key to be used when reporting perf value. 71 """ 72 temperature = utils.get_temperature_input_max() 73 logging.info('%s = %f degree Celsius', keyname, temperature) 74 self.output_perf_value( 75 description=keyname, 76 value=temperature, 77 units='Celsius', 78 higher_is_better=False) 79 80 def report_temperature_critical(self, keyname): 81 """Report temperature at which we will see throttling with given keyname. 82 83 @param keyname: key to be used when reporting perf value. 84 """ 85 temperature = utils.get_temperature_critical() 86 logging.info('%s = %f degree Celsius', keyname, temperature) 87 self.output_perf_value( 88 description=keyname, 89 value=temperature, 90 units='Celsius', 91 higher_is_better=False) 92 93 def is_no_checksum_test(self, testname): 94 """Check if given test requires no screenshot checksum. 95 96 @param testname: name of test to check. 97 """ 98 for prefix in self.no_checksum_tests: 99 if testname.startswith(prefix): 100 return True 101 return False 102 103 def load_imagenames(self, filename): 104 """Loads text file with MD5 file names. 105 106 @param filename: name of file to load. 107 """ 108 imagenames = os.path.join(self.autodir, filename) 109 with open(imagenames, 'r') as f: 110 imagenames = f.read() 111 return imagenames 112 113 @graphics_utils.GraphicsTest.failure_report_decorator('graphics_GLBench') 114 def run_once(self, options='', hasty=False): 115 dep = 'glbench' 116 dep_dir = os.path.join(self.autodir, 'deps', dep) 117 self.job.install_pkg(dep, 'dep', dep_dir) 118 119 options += self.blacklist 120 121 # Run the test, saving is optional and helps with debugging 122 # and reference image management. If unknown images are 123 # encountered one can take them from the outdir and copy 124 # them (after verification) into the reference image dir. 125 exefile = os.path.join(self.autodir, 'deps/glbench/glbench') 126 outdir = self.outputdir 127 options += ' -save -outdir=' + outdir 128 # Using the -hasty option we run only a subset of tests without waiting 129 # for thermals to normalize. Test should complete in 15-20 seconds. 130 if hasty: 131 options += ' -hasty' 132 133 cmd = '%s %s' % (exefile, options) 134 summary = None 135 pc_error_reason = None 136 try: 137 if hasty: 138 # On BVT the test will not monitor thermals so we will not verify its 139 # correct status using PerfControl 140 summary = utils.run(cmd, 141 stderr_is_expected=False, 142 stdout_tee=utils.TEE_TO_LOGS, 143 stderr_tee=utils.TEE_TO_LOGS).stdout 144 else: 145 self.report_temperature_critical('temperature_critical') 146 self.report_temperature('temperature_1_start') 147 # Wrap the test run inside of a PerfControl instance to make machine 148 # behavior more consistent. 149 with perf.PerfControl() as pc: 150 if not pc.verify_is_valid(): 151 raise error.TestFail('Failed: %s' % pc.get_error_reason()) 152 self.report_temperature('temperature_2_before_test') 153 154 # Run the test. If it gets the CPU too hot pc should notice. 155 summary = utils.run(cmd, 156 stderr_is_expected=False, 157 stdout_tee=utils.TEE_TO_LOGS, 158 stderr_tee=utils.TEE_TO_LOGS).stdout 159 if not pc.verify_is_valid(): 160 # Defer error handling until after perf report. 161 pc_error_reason = pc.get_error_reason() 162 except error.CmdError: 163 raise error.TestFail('Failed: CmdError running %s' % cmd) 164 except error.CmdTimeoutError: 165 raise error.TestFail('Failed: CmdTimeout running %s' % cmd) 166 167 # Write a copy of stdout to help debug failures. 168 results_path = os.path.join(self.outputdir, 'summary.txt') 169 f = open(results_path, 'w+') 170 f.write('# ---------------------------------------------------\n') 171 f.write('# [' + cmd + ']\n') 172 f.write(summary) 173 f.write('\n# -------------------------------------------------\n') 174 f.write('# [graphics_GLBench.py postprocessing]\n') 175 176 # Analyze the output. Sample: 177 ## board_id: NVIDIA Corporation - Quadro FX 380/PCI/SSE2 178 ## Running: ../glbench -save -outdir=img 179 #swap_swap = 221.36 us [swap_swap.pixmd5-20dbc...f9c700d2f.png] 180 results = summary.splitlines() 181 if not results: 182 f.close() 183 raise error.TestFail('Failed: No output from test. Check /tmp/' + 184 'test_that_latest/graphics_GLBench/summary.txt' + 185 ' for details.') 186 187 # The good images, the silenced and the zombie/recurring failures. 188 reference_imagenames = self.load_imagenames(self.reference_images_file) 189 knownbad_imagenames = self.load_imagenames(self.knownbad_images_file) 190 fixedbad_imagenames = self.load_imagenames(self.fixedbad_images_file) 191 192 # Check if we saw GLBench end as expected (without crashing). 193 test_ended_normal = False 194 for line in results: 195 if line.strip().startswith('@TEST_END'): 196 test_ended_normal = True 197 198 # Analyze individual test results in summary. 199 # TODO(pwang): Raise TestFail if an error is detected during glbench. 200 keyvals = {} 201 failed_tests = {} 202 for line in results: 203 if not line.strip().startswith('@RESULT: '): 204 continue 205 keyval, remainder = line[9:].split('[') 206 key, val = keyval.split('=') 207 testname = key.strip() 208 score, unit = val.split() 209 testrating = float(score) 210 imagefile = remainder.split(']')[0] 211 212 if not hasty: 213 higher = self.unit_higher_is_better.get(unit) 214 if higher is None: 215 raise error.TestFail('Failed: Unknown test unit "%s" for %s' % 216 (unit, testname)) 217 # Prepend unit to test name to maintain backwards compatibility with 218 # existing per data. 219 perf_value_name = '%s_%s' % (unit, testname) 220 self.output_perf_value( 221 description=perf_value_name, 222 value=testrating, 223 units=unit, 224 higher_is_better=higher, 225 graph=perf_value_name) 226 # Add extra value to the graph distinguishing different boards. 227 variant = utils.get_board_with_frequency_and_memory() 228 desc = '%s-%s' % (perf_value_name, variant) 229 self.output_perf_value( 230 description=desc, 231 value=testrating, 232 units=unit, 233 higher_is_better=higher, 234 graph=perf_value_name) 235 236 # Classify result image. 237 if testrating == -1.0: 238 # Tests that generate GL Errors. 239 glerror = imagefile.split('=')[1] 240 f.write('# GLError ' + glerror + ' during test (perf set to -3.0)\n') 241 keyvals[testname] = -3.0 242 failed_tests[testname] = 'GLError' 243 elif testrating == 0.0: 244 # Tests for which glbench does not generate a meaningful perf score. 245 f.write('# No score for test\n') 246 keyvals[testname] = 0.0 247 elif imagefile in fixedbad_imagenames: 248 # We know the image looked bad at some point in time but we thought 249 # it was fixed. Throw an exception as a reminder. 250 keyvals[testname] = -2.0 251 f.write('# fixedbad [' + imagefile + '] (setting perf as -2.0)\n') 252 failed_tests[testname] = imagefile 253 elif imagefile in knownbad_imagenames: 254 # We have triaged the failure and have filed a tracking bug. 255 # Don't throw an exception and remind there is a problem. 256 keyvals[testname] = -1.0 257 f.write('# knownbad [' + imagefile + '] (setting perf as -1.0)\n') 258 # This failure is whitelisted so don't add to failed_tests. 259 elif imagefile in reference_imagenames: 260 # Known good reference images (default). 261 keyvals[testname] = testrating 262 elif imagefile == 'none': 263 # Tests that do not write images can't fail because of them. 264 keyvals[testname] = testrating 265 elif self.is_no_checksum_test(testname): 266 # TODO(ihf): these really should not write any images 267 keyvals[testname] = testrating 268 else: 269 # Completely unknown images. Raise a failure. 270 keyvals[testname] = -2.0 271 failed_tests[testname] = imagefile 272 f.write('# unknown [' + imagefile + '] (setting perf as -2.0)\n') 273 f.close() 274 if not hasty: 275 self.report_temperature('temperature_3_after_test') 276 self.write_perf_keyval(keyvals) 277 278 # Raise exception if images don't match. 279 if failed_tests: 280 logging.info('Some images are not matching their reference in %s.', 281 self.reference_images_file) 282 logging.info('Please verify that the output images are correct ' 283 'and if so copy them to the reference directory.') 284 raise error.TestFail('Failed: Some images are not matching their ' 285 'references. Check /tmp/' 286 'test_that_latest/graphics_GLBench/summary.txt' 287 ' for details.') 288 289 if not test_ended_normal: 290 raise error.TestFail( 291 'Failed: No end marker. Presumed crash/missing images.') 292 if pc_error_reason: 293 raise error.TestFail('Failed: %s' % pc_error_reason) 294