Home | History | Annotate | Download | only in perf_expectations
      1 #!/usr/bin/env python
      2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
      3 # Use of this source code is governed by a BSD-style license that can be
      4 # found in the LICENSE file.
      5 
      6 """Prepare tests that require re-baselining for input to make_expectations.py.
      7 
      8 The regularly running perf-AV tests require re-baselineing of expectations
      9 about once a week. The steps involved in rebaselining are:
     10 
     11 1.) Identify the tests to update, based off reported e-mail results.
     12 2.) Figure out reva and revb values, which is the starting and ending revision
     13  numbers for the range that we should use to obtain new thresholds.
     14 3.) Modify lines in perf_expectations.json referring to the tests to be updated,
     15  so that they may be used as input to make_expectations.py.
     16 
     17 This script automates the last step above.
     18 
     19 Here's a sample line from perf_expectations.json:
     20 
     21 "win-release/media_tests_av_perf/fps/tulip2.m4a": {"reva": 163299, \
     22 "revb": 164141, "type": "absolute", "better": "higher", "improve": 0, \
     23 "regress": 0, "sha1": "54d94538"},
     24 
     25 To get the above test ready for input to make_expectations.py, it should become:
     26 
     27 "win-release/media_tests_av_perf/fps/tulip2.m4a": {"reva": <new reva>, \
     28 "revb": <new revb>, "type": "absolute", "better": "higher", "improve": 0, \
     29 "regress": 0},
     30 
     31 Examples:
     32 
     33 1.) To update the test specified above and get baseline
     34 values using the revision range 12345 and 23456, run this script with a command
     35 line like this:
     36   python update_perf_expectations.py -f \
     37   win-release/media_tests_av_perf/fps/tulip2.m4a --reva 12345 --revb 23456
     38 Or, using an input file,
     39 where the input file contains a single line with text
     40   win-release/media_tests_av_perf/fps/tulip2.m4a
     41 run with this command line:
     42   python update_perf_expectations.py -i input.txt --reva 12345 --revb 23456
     43 
     44 2.) Let's say you want to update all seek tests on windows, and get baseline
     45 values using the revision range 12345 and 23456.
     46 Run this script with this command line:
     47   python update_perf_expectations.py -f win-release/media_tests_av_perf/seek/ \
     48    --reva 12345 --revb 23456
     49 Or:
     50   python update_perf_expectations.py -f win-release/.*/seek/ --reva 12345 \
     51   --revb 23456
     52 
     53 Or, using an input file,
     54 where the input file contains a single line with text win-release/.*/seek/:
     55   python update_perf_expectations.py -i input.txt --reva 12345 --revb 23456
     56 
     57 3.) Similarly, if you want to update seek tests on all platforms
     58   python update_perf_expectations.py -f .*-release/.*/seek/ --reva 12345 \
     59   --revb 23456
     60 
     61 """
     62 
     63 import logging
     64 from optparse import OptionParser
     65 import os
     66 import re
     67 
     68 import make_expectations as perf_ex_lib
     69 
     70 # Default logging is INFO. Use --verbose to enable DEBUG logging.
     71 _DEFAULT_LOG_LEVEL = logging.INFO
     72 
     73 
     74 def GetTestsToUpdate(contents, all_test_keys):
     75   """Parses input contents and obtains tests to be re-baselined.
     76 
     77   Args:
     78     contents: string containing contents of input file.
     79     all_test_keys: list of keys of test dictionary.
     80   Returns:
     81     A list of keys for tests that should be updated.
     82   """
     83   # Each line of the input file specifies a test case to update.
     84   tests_list = []
     85   for test_case_filter in contents.splitlines():
     86     # Skip any empty lines.
     87     if test_case_filter:
     88       # Sample expected line:
     89       # win-release/media_tests_av_perf/seek/\
     90       # CACHED_BUFFERED_SEEK_NoConstraints_crowd1080.ogv
     91       # Or, if reg-ex, then sample line:
     92       # win-release/media-tests_av_perf/seek*
     93       # Skip any leading spaces if they exist in the input file.
     94       logging.debug('Trying to match %s', test_case_filter)
     95       tests_list.extend(GetMatchingTests(test_case_filter.strip(),
     96                                          all_test_keys))
     97   return tests_list
     98 
     99 
    100 def GetMatchingTests(tests_to_update, all_test_keys):
    101   """Parses input reg-ex filter and obtains tests to be re-baselined.
    102 
    103   Args:
    104     tests_to_update: reg-ex string specifying tests to be updated.
    105     all_test_keys: list of keys of tests dictionary.
    106   Returns:
    107     A list of keys for tests that should be updated.
    108   """
    109   tests_list = []
    110   search_string = re.compile(tests_to_update)
    111   # Get matching tests from the dictionary of tests
    112   for test_key in all_test_keys:
    113     if search_string.match(test_key):
    114       tests_list.append(test_key)
    115       logging.debug('%s will be updated', test_key)
    116   logging.info('%s tests found matching reg-ex: %s', len(tests_list),
    117                tests_to_update)
    118   return tests_list
    119 
    120 
    121 def PrepareTestsForUpdate(tests_to_update, all_tests, reva, revb):
    122   """Modifies value of tests that are to re-baselined:
    123      Set reva and revb values to specified new values. Remove sha1.
    124 
    125   Args:
    126     tests_to_update: list of tests to be updated.
    127     all_tests: dictionary of all tests.
    128     reva: oldest revision in range to use for new values.
    129     revb: newest revision in range to use for new values.
    130   Raises:
    131     ValueError: If reva or revb are not valid ints, or if either
    132     of them are negative.
    133   """
    134   reva = int(reva)
    135   revb = int(revb)
    136 
    137   if reva < 0 or revb < 0:
    138     raise ValueError('Revision values should be positive.')
    139   # Ensure reva is less than revb.
    140   # (this is similar to the check done in make_expectations.py)
    141   if revb < reva:
    142     temp = revb
    143     revb = reva
    144     reva = temp
    145   for test_key in tests_to_update:
    146     # Get original test from the dictionary of tests
    147     test_value = all_tests[test_key]
    148     if test_value:
    149       # Sample line in perf_expectations.json:
    150       #  "linux-release/media_tests _av_perf/dropped_frames/crowd360.webm":\
    151       # {"reva": 155180, "revb": 155280, "type": "absolute", \
    152       # "better": "lower", "improve": 0, "regress": 3, "sha1": "276ba29c"},
    153       # Set new revision range
    154       test_value['reva'] = reva
    155       test_value['revb'] = revb
    156       # Remove sha1 to indicate this test requires an update
    157       # Check first to make sure it exist.
    158       if 'sha1' in test_value:
    159         del test_value['sha1']
    160     else:
    161       logging.warning('%s does not exist.', test_key)
    162   logging.info('Done preparing tests for update.')
    163 
    164 
    165 def GetCommandLineOptions():
    166   """Parse command line arguments.
    167 
    168   Returns:
    169     An options object containing command line arguments and their values.
    170   """
    171   parser = OptionParser()
    172 
    173   parser.add_option('--reva', dest='reva', type='int',
    174                     help='Starting revision of new range.',
    175                     metavar='START_REVISION')
    176   parser.add_option('--revb', dest='revb', type='int',
    177                     help='Ending revision of new range.',
    178                     metavar='END_REVISION')
    179   parser.add_option('-f', dest='tests_filter',
    180                     help='Regex to use for filtering tests to be updated. '
    181                     'At least one of -filter or -input_file must be provided. '
    182                     'If both are provided, then input-file is used.',
    183                     metavar='FILTER', default='')
    184   parser.add_option('-i', dest='input_file',
    185                     help='Optional path to file with reg-exes for tests to'
    186                     ' update. If provided, it overrides the filter argument.',
    187                     metavar='INPUT_FILE', default='')
    188   parser.add_option('--config', dest='config_file',
    189                     default=perf_ex_lib.DEFAULT_CONFIG_FILE,
    190                     help='Set the config file to FILE.', metavar='FILE')
    191   parser.add_option('-v', dest='verbose', action='store_true', default=False,
    192                     help='Enable verbose output.')
    193   options = parser.parse_args()[0]
    194   return options
    195 
    196 
    197 def Main():
    198   """Main driver function."""
    199   options = GetCommandLineOptions()
    200 
    201   _SetLogger(options.verbose)
    202   # Do some command-line validation
    203   if not options.input_file and not options.tests_filter:
    204     logging.error('At least one of input-file or test-filter must be provided.')
    205     exit(1)
    206   if options.input_file and options.tests_filter:
    207     logging.error('Specify only one of input file or test-filter.')
    208     exit(1)
    209   if not options.reva or not options.revb:
    210     logging.error('Start and end revision of range must be specified.')
    211     exit(1)
    212 
    213   # Load config.
    214   config = perf_ex_lib.ConvertJsonIntoDict(
    215       perf_ex_lib.ReadFile(options.config_file))
    216 
    217   # Obtain the perf expectations file from the config file.
    218   perf_file = os.path.join(
    219       os.path.dirname(options.config_file), config['perf_file'])
    220 
    221   # We should have all the information we require now.
    222   # On to the real thang.
    223   # First, get all the existing tests from the original perf_expectations file.
    224   all_tests = perf_ex_lib.ConvertJsonIntoDict(
    225       perf_ex_lib.ReadFile(perf_file))
    226   all_test_keys = all_tests.keys()
    227   # Remove the load key, because we don't want to modify it.
    228   all_test_keys.remove('load')
    229   # Keep tests sorted, like in the original file.
    230   all_test_keys.sort()
    231 
    232   # Next, get all tests that have been identified for an update.
    233   tests_to_update = []
    234   if options.input_file:
    235     # Tests to update have been specified in an input_file.
    236     # Get contents of file.
    237     tests_filter = perf_ex_lib.ReadFile(options.input_file)
    238   elif options.tests_filter:
    239     # Tests to update have been specified as a reg-ex filter.
    240     tests_filter = options.tests_filter
    241 
    242   # Get tests to update based on filter specified.
    243   tests_to_update = GetTestsToUpdate(tests_filter, all_test_keys)
    244   logging.info('Done obtaining matching tests.')
    245 
    246   # Now, prepare tests for update.
    247   PrepareTestsForUpdate(tests_to_update, all_tests, options.reva, options.revb)
    248 
    249   # Finally, write modified tests back to perf_expectations file.
    250   perf_ex_lib.WriteJson(perf_file, all_tests, all_test_keys,
    251                         calculate_sha1=False)
    252   logging.info('Done writing tests for update to %s.', perf_file)
    253 
    254 
    255 def _SetLogger(verbose):
    256   log_level = _DEFAULT_LOG_LEVEL
    257   if verbose:
    258     log_level = logging.DEBUG
    259   logging.basicConfig(level=log_level, format='%(message)s')
    260 
    261 
    262 if __name__ == '__main__':
    263   Main()
    264