Home | History | Annotate | Download | only in graphics_dEQP
      1 # Copyright 2014 The Chromium OS Authors. All rights reserved.
      2 # Use of this source code is governed by a BSD-style license that can be
      3 # found in the LICENSE file.
      4 
      5 import bz2
      6 import glob
      7 import json
      8 import logging
      9 import os
     10 import re
     11 import shutil
     12 import tempfile
     13 import time
     14 import xml.etree.ElementTree as et
     15 from autotest_lib.client.bin import test, utils
     16 from autotest_lib.client.common_lib import error
     17 from autotest_lib.client.cros import cros_logging, service_stopper
     18 from autotest_lib.client.cros.graphics import graphics_utils
     19 
     20 
     21 class graphics_dEQP(test.test):
     22     """Run the drawElements Quality Program test suite.
     23     """
     24     version = 1
     25     _services = None
     26     _hasty = False
     27     _hasty_batch_size = 100  # Batch size in hasty mode.
     28     _shard_number = 0
     29     _shard_count = 1
     30     _board = None
     31     _cpu_type = None
     32     _gpu_type = None
     33     _surface = None
     34     _can_run_executables = []
     35     _filter = None
     36     _width = 256  # Use smallest width for which all tests run/pass.
     37     _height = 256  # Use smallest height for which all tests run/pass.
     38     _timeout = 70  # Larger than twice the dEQP watchdog timeout at 30s.
     39     _test_names = None
     40     _test_names_file = None
     41     _log_path = None  # Location for detailed test output logs (in /tmp/).
     42     _debug = False  # Analyze kernel messages.
     43     _log_reader = None  # Reader to analyze (kernel) messages log.
     44     _log_filter = re.compile('.* .* kernel:')  # kernel messages filter.
     45     _env = None # environment for test processes
     46 
     47     DEQP_BASEDIR = '/usr/local/deqp'
     48     DEQP_MODULES = {
     49         'dEQP-EGL': 'egl',
     50         'dEQP-GLES2': 'gles2',
     51         'dEQP-GLES3': 'gles3',
     52         'dEQP-GLES31': 'gles31',
     53         'dEQP-VK': 'vk',
     54     }
     55 
     56     def initialize(self):
     57         self._board = utils.get_board()
     58         self._cpu_type = utils.get_cpu_soc_family()
     59         self._gpu_type = utils.get_gpu_family()
     60 
     61         # deqp may depend on libraries that are present only on test images.
     62         # Those libraries are installed in /usr/local.
     63         self._env = os.environ.copy()
     64         old_ld_path = self._env.get('LD_LIBRARY_PATH', '')
     65         if old_ld_path:
     66             self._env['LD_LIBRARY_PATH'] = '/usr/local/lib:/usr/local/lib64:' + old_ld_path
     67         else:
     68             self._env['LD_LIBRARY_PATH'] = '/usr/local/lib:/usr/local/lib64'
     69 
     70         # Determine which executable should be run. Right now never egl.
     71         major, minor = graphics_utils.get_gles_version()
     72         logging.info('Found gles%d.%d.', major, minor)
     73         if major is None or minor is None:
     74             raise error.TestFail(
     75                 'Failed: Could not get gles version information (%d, %d).' %
     76                 (major, minor))
     77         if major >= 2:
     78             self._can_run_executables.append('gles2/deqp-gles2')
     79         if major >= 3:
     80             self._can_run_executables.append('gles3/deqp-gles3')
     81             if major > 3 or minor >= 1:
     82                 self._can_run_executables.append('gles31/deqp-gles31')
     83 
     84         # If libvulkan is installed, then assume the board supports vulkan.
     85         has_libvulkan = False
     86         for libdir in ('/usr/lib', '/usr/lib64', '/usr/local/lib', '/usr/local/lib64'):
     87             if os.path.exists(os.path.join(libdir, 'libvulkan.so')):
     88                 has_libvulkan = True
     89 
     90         if (has_libvulkan and
     91                 os.path.exists('/usr/local/deqp/external/vulkancts/modules/vulkan/deqp-vk')):
     92             self._can_run_executables.append('external/vulkancts/modules/vulkan/deqp-vk')
     93 
     94         self._services = service_stopper.ServiceStopper(['ui', 'powerd'])
     95         # Valid choices are fbo and pbuffer. The latter avoids dEQP assumptions.
     96         self._surface = 'pbuffer'
     97 
     98     def cleanup(self):
     99         if self._services:
    100             self._services.restore_services()
    101 
    102     def _parse_test_results(self, result_filename, test_results=None):
    103         """Handles result files with one or more test results.
    104 
    105         @param result_filename: log file to parse.
    106 
    107         @return: dictionary of parsed test results.
    108         """
    109         xml = ''
    110         xml_start = False
    111         xml_complete = False
    112         xml_bad = False
    113         result = 'ParseTestResultFail'
    114 
    115         if test_results is None:
    116             test_results = {}
    117 
    118         if not os.path.isfile(result_filename):
    119             return test_results
    120         # TODO(ihf): Add names of failing tests to a list in the results.
    121         with open(result_filename) as result_file:
    122             for line in result_file.readlines():
    123                 # If the test terminates early, the XML will be incomplete
    124                 # and should not be parsed.
    125                 if line.startswith('#terminateTestCaseResult'):
    126                     result = line.strip().split()[1]
    127                     xml_bad = True
    128                 # Will only see #endTestCaseResult if the test does not
    129                 # terminate early.
    130                 elif line.startswith('#endTestCaseResult'):
    131                     xml_complete = True
    132                 elif xml_start:
    133                     xml += line
    134                 elif line.startswith('#beginTestCaseResult'):
    135                     # If we see another begin before an end then something is
    136                     # wrong.
    137                     if xml_start:
    138                         xml_bad = True
    139                     else:
    140                         xml_start = True
    141 
    142                 if xml_complete or xml_bad:
    143                     if xml_complete:
    144                         myparser = et.XMLParser(encoding='ISO-8859-1')
    145                         root = et.fromstring(xml, parser=myparser)
    146                         result = root.find('Result').get('StatusCode').strip()
    147                         xml_complete = False
    148                     test_results[result] = test_results.get(result, 0) + 1
    149                     xml_bad = False
    150                     xml_start = False
    151                     result = 'ParseTestResultFail'
    152                     xml = ''
    153 
    154         return test_results
    155 
    156     def _load_not_passing_cases(self, test_filter):
    157         """Load all test cases that are in non-'Pass' expectations."""
    158         not_passing_cases = []
    159         expectations_dir = os.path.join(self.bindir, 'expectations',
    160                                         self._gpu_type)
    161         subset_spec = '%s.*' % test_filter
    162         subset_paths = glob.glob(os.path.join(expectations_dir, subset_spec))
    163         for subset_file in subset_paths:
    164             # Filter against extra hasty failures only in hasty mode.
    165             if (not '.Pass.bz2' in subset_file and
    166                 (self._hasty or '.hasty.' not in subset_file)):
    167                 not_passing_cases.extend(
    168                     bz2.BZ2File(subset_file).read().splitlines())
    169         not_passing_cases.sort()
    170         return not_passing_cases
    171 
    172     def _get_executable(self, name):
    173         # Determine module from test_names or filter.
    174         test_prefix = name.split('.')[0]
    175         if test_prefix in self.DEQP_MODULES:
    176             module = self.DEQP_MODULES[test_prefix]
    177         else:
    178             raise error.TestFail('Failed: Invalid test name: %s' % name)
    179 
    180         if module == 'vk':
    181             executable = os.path.join(self.DEQP_BASEDIR,
    182                     'external/vulkancts/modules/vulkan/deqp-vk')
    183         else:
    184             executable = os.path.join(os.path.join(self.DEQP_BASEDIR,
    185                 'modules', module, 'deqp-%s' % module))
    186 
    187         return executable
    188 
    189     def _can_run(self, executable):
    190         for bin in self._can_run_executables:
    191             if bin in executable:
    192                 return True
    193         return False
    194 
    195     def _bootstrap_new_test_cases(self, test_filter):
    196         """Ask dEQP for all test cases and removes non-Pass'ing ones.
    197 
    198         This function will query dEQP for test cases and remove all cases that
    199         are not in 'Pass'ing expectations from the list. This can be used
    200         incrementally updating failing/hangin tests over several runs.
    201 
    202         @param test_filter: string like 'dEQP-GLES2.info', 'dEQP-GLES3.stress'.
    203 
    204         @return: List of dEQP tests to run.
    205         """
    206         test_cases = []
    207         executable = self._get_executable(test_filter)
    208         if not self._can_run(executable):
    209             return test_cases
    210 
    211         # Must be in the executable directory when running for it to find it's
    212         # test data files!
    213         os.chdir(os.path.dirname(executable))
    214 
    215         not_passing_cases = self._load_not_passing_cases(test_filter)
    216         # We did not find passing cases in expectations. Assume everything else
    217         # that is there should not be run this time.
    218         expectations_dir = os.path.join(self.bindir, 'expectations',
    219                                         self._gpu_type)
    220         subset_spec = '%s.*' % test_filter
    221         subset_paths = glob.glob(os.path.join(expectations_dir, subset_spec))
    222         for subset_file in subset_paths:
    223             # Filter against hasty failures only in hasty mode.
    224             if self._hasty or '.hasty.' not in subset_file:
    225                 not_passing_cases.extend(
    226                     bz2.BZ2File(subset_file).read().splitlines())
    227 
    228         # Now ask dEQP executable nicely for whole list of tests. Needs to be
    229         # run in executable directory. Output file is plain text file named
    230         # e.g. 'dEQP-GLES2-cases.txt'.
    231         command = ('%s '
    232                    '--deqp-runmode=txt-caselist '
    233                    '--deqp-surface-type=%s '
    234                    '--deqp-gl-config-name=rgba8888d24s8ms0 ' % (executable,
    235                                                                 self._surface))
    236         logging.info('Running command %s', command)
    237         utils.run(command,
    238                   env=self._env,
    239                   timeout=60,
    240                   stderr_is_expected=False,
    241                   ignore_status=False,
    242                   stdin=None)
    243 
    244         # Now read this caselist file.
    245         caselist_name = '%s-cases.txt' % test_filter.split('.')[0]
    246         caselist_file = os.path.join(os.path.dirname(executable), caselist_name)
    247         if not os.path.isfile(caselist_file):
    248             raise error.TestFail('Failed: No caselist file at %s!' %
    249                                  caselist_file)
    250 
    251         # And remove non-Pass'ing expectations from caselist.
    252         caselist = open(caselist_file).read().splitlines()
    253         # Contains lines like "TEST: dEQP-GLES2.capability"
    254         test_cases = []
    255         match = 'TEST: %s' % test_filter
    256         logging.info('Bootstrapping test cases matching "%s".', match)
    257         for case in caselist:
    258             if case.startswith(match):
    259                 case = case.split('TEST: ')[1]
    260                 test_cases.append(case)
    261 
    262         test_cases = list(set(test_cases) - set(not_passing_cases))
    263         if not test_cases:
    264             raise error.TestFail('Failed: Unable to bootstrap %s!' % test_filter)
    265 
    266         test_cases.sort()
    267         return test_cases
    268 
    269     def _get_test_cases(self, test_filter, subset):
    270         """Gets the test cases for 'Pass', 'Fail' etc. expectations.
    271 
    272         This function supports bootstrapping of new GPU families and dEQP
    273         binaries. In particular if there are not 'Pass' expectations found for
    274         this GPU family it will query the dEQP executable for a list of all
    275         available tests. It will then remove known non-'Pass'ing tests from this
    276         list to avoid getting into hangs/crashes etc.
    277 
    278         @param test_filter: string like 'dEQP-GLES2.info', 'dEQP-GLES3.stress'.
    279         @param subset: string from 'Pass', 'Fail', 'Timeout' etc.
    280 
    281         @return: List of dEQP tests to run.
    282         """
    283         expectations_dir = os.path.join(self.bindir, 'expectations',
    284                                         self._gpu_type)
    285         subset_name = '%s.%s.bz2' % (test_filter, subset)
    286         subset_path = os.path.join(expectations_dir, subset_name)
    287         if not os.path.isfile(subset_path):
    288             if subset == 'NotPass':
    289                 # TODO(ihf): Running hasty and NotPass together is an invitation
    290                 # for trouble (stability). Decide if it should be disallowed.
    291                 return self._load_not_passing_cases(test_filter)
    292             if subset != 'Pass':
    293                 raise error.TestFail('Failed: No subset file found for %s!' %
    294                                      subset_path)
    295             # Ask dEQP for all cases and remove the failing ones.
    296             return self._bootstrap_new_test_cases(test_filter)
    297 
    298         test_cases = bz2.BZ2File(subset_path).read().splitlines()
    299         if not test_cases:
    300             raise error.TestFail(
    301                 'Failed: No test cases found in subset file %s!' % subset_path)
    302         return test_cases
    303 
    304     def run_tests_individually(self, test_cases):
    305         """Runs tests as isolated from each other, but slowly.
    306 
    307         This function runs each test case separately as a command.
    308         This means a new context for each test etc. Failures will be more
    309         isolated, but runtime quite high due to overhead.
    310 
    311         @param test_cases: List of dEQP test case strings.
    312 
    313         @return: dictionary of test results.
    314         """
    315         test_results = {}
    316         width = self._width
    317         height = self._height
    318 
    319         i = 0
    320         for test_case in test_cases:
    321             i += 1
    322             logging.info('[%d/%d] TestCase: %s', i, len(test_cases), test_case)
    323             result_prefix = os.path.join(self._log_path, test_case)
    324             log_file = '%s.log' % result_prefix
    325             debug_file = '%s.debug' % result_prefix
    326             executable = self._get_executable(test_case)
    327             command = ('%s '
    328                        '--deqp-case=%s '
    329                        '--deqp-surface-type=%s '
    330                        '--deqp-gl-config-name=rgba8888d24s8ms0 '
    331                        '--deqp-log-images=disable '
    332                        '--deqp-watchdog=enable '
    333                        '--deqp-surface-width=%d '
    334                        '--deqp-surface-height=%d '
    335                        '--deqp-log-filename=%s' % (executable, test_case,
    336                                                    self._surface, width, height,
    337                                                    log_file))
    338 
    339             if not self._can_run(executable):
    340                 result = 'Skipped'
    341                 logging.info('Skipping on %s: %s', self._gpu_type, test_case)
    342             else:
    343                 logging.debug('Running single: %s', command)
    344 
    345                 # Must be in the executable directory when running for it to find it's
    346                 # test data files!
    347                 os.chdir(os.path.dirname(executable))
    348 
    349                 # Must initialize because some errors don't repopulate
    350                 # run_result, leaving old results.
    351                 run_result = {}
    352                 start_time = time.time()
    353                 try:
    354                     run_result = utils.run(command,
    355                                            env=self._env,
    356                                            timeout=self._timeout,
    357                                            stderr_is_expected=False,
    358                                            ignore_status=True)
    359                     result_counts = self._parse_test_results(log_file)
    360                     if result_counts:
    361                         result = result_counts.keys()[0]
    362                     else:
    363                         result = 'Unknown'
    364                 except error.CmdTimeoutError:
    365                     result = 'TestTimeout'
    366                 except error.CmdError:
    367                     result = 'CommandFailed'
    368                 except Exception:
    369                     result = 'UnexpectedError'
    370                 end_time = time.time()
    371 
    372                 if self._debug:
    373                     # Collect debug info and save to json file.
    374                     output_msgs = {
    375                         'start_time': start_time,
    376                         'end_time': end_time,
    377                         'stdout': [],
    378                         'stderr': [],
    379                         'dmesg': []
    380                     }
    381                     logs = self._log_reader.get_logs()
    382                     self._log_reader.set_start_by_current()
    383                     output_msgs['dmesg'] = [
    384                         msg for msg in logs.splitlines()
    385                         if self._log_filter.match(msg)
    386                     ]
    387                     if run_result:
    388                         output_msgs['stdout'] = run_result.stdout.splitlines()
    389                         output_msgs['stderr'] = run_result.stderr.splitlines()
    390                     with open(debug_file, 'w') as fd:
    391                         json.dump(
    392                             output_msgs,
    393                             fd,
    394                             indent=4,
    395                             separators=(',', ' : '),
    396                             sort_keys=True)
    397 
    398             logging.info('Result: %s', result)
    399             test_results[result] = test_results.get(result, 0) + 1
    400 
    401         return test_results
    402 
    403     def run_tests_hasty(self, test_cases):
    404         """Runs tests as quickly as possible.
    405 
    406         This function runs all the test cases, but does not isolate tests and
    407         may take shortcuts/not run all tests to provide maximum coverage at
    408         minumum runtime.
    409 
    410         @param test_cases: List of dEQP test case strings.
    411 
    412         @return: dictionary of test results.
    413         """
    414         # TODO(ihf): It saves half the test time to use 32*32 but a few tests
    415         # fail as they need surfaces larger than 200*200.
    416         width = self._width
    417         height = self._height
    418         results = {}
    419 
    420         # All tests combined less than 1h in hasty.
    421         batch_timeout = min(3600, self._timeout * self._hasty_batch_size)
    422         num_test_cases = len(test_cases)
    423 
    424         # We are dividing the number of tests into several shards but run them
    425         # in smaller batches. We start and end at multiples of batch_size
    426         # boundaries.
    427         shard_start = self._hasty_batch_size * (
    428             (self._shard_number * (num_test_cases / self._shard_count)) /
    429             self._hasty_batch_size)
    430         shard_end = self._hasty_batch_size * ((
    431             (self._shard_number + 1) * (num_test_cases / self._shard_count)) /
    432                                               self._hasty_batch_size)
    433         # The last shard will be slightly larger than the others. Extend it to
    434         # cover all test cases avoiding rounding problems with the integer
    435         # arithmetics done to compute shard_start and shard_end.
    436         if self._shard_number + 1 == self._shard_count:
    437             shard_end = num_test_cases
    438 
    439         for batch in xrange(shard_start, shard_end, self._hasty_batch_size):
    440             batch_to = min(batch + self._hasty_batch_size, shard_end)
    441             batch_cases = '\n'.join(test_cases[batch:batch_to])
    442             # This assumes all tests in the batch are kicked off via the same
    443             # executable.
    444             executable = self._get_executable(test_cases[batch])
    445             command = ('%s '
    446                        '--deqp-stdin-caselist '
    447                        '--deqp-surface-type=%s '
    448                        '--deqp-gl-config-name=rgba8888d24s8ms0 '
    449                        '--deqp-log-images=disable '
    450                        '--deqp-visibility=hidden '
    451                        '--deqp-watchdog=enable '
    452                        '--deqp-surface-width=%d '
    453                        '--deqp-surface-height=%d ' % (executable, self._surface,
    454                                                       width, height))
    455 
    456             log_file = os.path.join(self._log_path,
    457                                     '%s_hasty_%d.log' % (self._filter, batch))
    458 
    459             command += '--deqp-log-filename=' + log_file
    460 
    461             if not self._can_run(executable):
    462                 logging.info('Skipping tests on %s: %s', self._gpu_type,
    463                              batch_cases)
    464             else:
    465                 logging.info('Running tests %d...%d out of %d:\n%s\n%s',
    466                              batch + 1, batch_to, num_test_cases, command,
    467                              batch_cases)
    468 
    469                 # Must be in the executable directory when running for it to find it's
    470                 # test data files!
    471                 os.chdir(os.path.dirname(executable))
    472 
    473                 try:
    474                     utils.run(command,
    475                               env=self._env,
    476                               timeout=batch_timeout,
    477                               stderr_is_expected=False,
    478                               ignore_status=False,
    479                               stdin=batch_cases)
    480                 except Exception:
    481                     pass
    482                 # We are trying to handle all errors by parsing the log file.
    483                 results = self._parse_test_results(log_file, results)
    484                 logging.info(results)
    485         return results
    486 
    487     def run_once(self, opts=None):
    488         options = dict(filter='',
    489                        test_names='',  # e.g., dEQP-GLES3.info.version,
    490                                        # dEQP-GLES2.functional,
    491                                        # dEQP-GLES3.accuracy.texture, etc.
    492                        test_names_file='',
    493                        timeout=self._timeout,
    494                        subset_to_run='Pass',  # Pass, Fail, Timeout, NotPass...
    495                        hasty='False',
    496                        shard_number='0',
    497                        shard_count='1',
    498                        debug='False')
    499         if opts is None:
    500             opts = []
    501         options.update(utils.args_to_dict(opts))
    502         logging.info('Test Options: %s', options)
    503 
    504         self._hasty = (options['hasty'] == 'True')
    505         self._timeout = int(options['timeout'])
    506         self._test_names_file = options['test_names_file']
    507         self._test_names = options['test_names']
    508         self._shard_number = int(options['shard_number'])
    509         self._shard_count = int(options['shard_count'])
    510         self._debug = (options['debug'] == 'True')
    511         if not (self._test_names_file or self._test_names):
    512             self._filter = options['filter']
    513             if not self._filter:
    514                 raise error.TestFail('Failed: No dEQP test filter specified')
    515 
    516         # Some information to help postprocess logs into blacklists later.
    517         logging.info('ChromeOS BOARD = %s', self._board)
    518         logging.info('ChromeOS CPU family = %s', self._cpu_type)
    519         logging.info('ChromeOS GPU family = %s', self._gpu_type)
    520 
    521         # Create a place to put detailed test output logs.
    522         if self._filter:
    523             logging.info('dEQP test filter = %s', self._filter)
    524             self._log_path = os.path.join(tempfile.gettempdir(),
    525                                           '%s-logs' % self._filter)
    526         else:
    527             base = os.path.basename(self._test_names_file)
    528             # TODO(ihf): Clean this up.
    529             logging.info('dEQP test filter = %s', os.path.splitext(base)[0])
    530             self._log_path = os.path.join(tempfile.gettempdir(),
    531                                           '%s-logs' % base)
    532         shutil.rmtree(self._log_path, ignore_errors=True)
    533         os.mkdir(self._log_path)
    534 
    535         self._services.stop_services()
    536         if self._test_names_file:
    537             test_cases = [
    538                 line.rstrip('\n')
    539                 for line in open(
    540                     os.path.join(self.bindir, self._test_names_file))
    541             ]
    542             test_cases = [
    543                 test for test in test_cases if test and not test.isspace()
    544             ]
    545         if self._test_names:
    546             test_cases = []
    547             for name in self._test_names.split(','):
    548                 test_cases.extend(self._get_test_cases(name, 'Pass'))
    549         if self._filter:
    550             test_cases = self._get_test_cases(self._filter,
    551                                               options['subset_to_run'])
    552 
    553         if self._debug:
    554             # LogReader works on /var/log/messages by default.
    555             self._log_reader = cros_logging.LogReader()
    556             self._log_reader.set_start_by_current()
    557 
    558         test_results = {}
    559         if self._hasty:
    560             logging.info('Running in hasty mode.')
    561             test_results = self.run_tests_hasty(test_cases)
    562         else:
    563             logging.info('Running each test individually.')
    564             test_results = self.run_tests_individually(test_cases)
    565 
    566         logging.info('Test results:')
    567         logging.info(test_results)
    568         self.write_perf_keyval(test_results)
    569 
    570         test_count = 0
    571         test_failures = 0
    572         test_passes = 0
    573         test_skipped = 0
    574         for result in test_results:
    575             test_count += test_results[result]
    576             if result.lower() in ['pass']:
    577                 test_passes += test_results[result]
    578             if result.lower() not in [
    579                     'pass', 'notsupported', 'internalerror', 'qualitywarning',
    580                     'compatibilitywarning', 'skipped'
    581             ]:
    582                 test_failures += test_results[result]
    583             if result.lower() in ['skipped']:
    584                 test_skipped += test_results[result]
    585         # The text "Completed all tests." is used by the process_log.py script
    586         # and should always appear at the end of a completed test run.
    587         logging.info(
    588             'Completed all tests. Saw %d tests, %d passes and %d failures.',
    589             test_count, test_passes, test_failures)
    590 
    591         if self._filter and test_count == 0 and options[
    592                 'subset_to_run'] != 'NotPass':
    593             logging.warning('No test cases found for filter: %s!', self._filter)
    594 
    595         if options['subset_to_run'] == 'NotPass':
    596             if test_passes:
    597                 # TODO(ihf): Make this an annotated TestPass once available.
    598                 raise error.TestWarn(
    599                     '%d formerly failing tests are passing now.' % test_passes)
    600         elif test_failures:
    601             # TODO(ihf): Delete this once hasty expectations have been
    602             # checked in.
    603             if self._gpu_type.startswith('tegra'):
    604                 raise error.TestWarn('Failed: on %s %d/%d tests failed.' % (
    605                                      self._gpu_type, test_failures, test_count))
    606             raise error.TestFail('Failed: on %s %d/%d tests failed.' %
    607                                  (self._gpu_type, test_failures, test_count))
    608         if test_skipped > 0:
    609             raise error.TestFail('Failed: on %s %d tests skipped, %d passes' %
    610                                  (self._gpu_type, test_skipped, test_passes))
    611