Home | History | Annotate | Download | only in deprecated
      1 #!/usr/bin/python2
      2 #
      3 # Copyright 2010 Google Inc. All Rights Reserved.
      4 """Script to compare ChromeOS benchmarks
      5 
      6 Inputs:
      7     <perflab-output directory 1 - baseline>
      8     <perflab-output directory 2 - results>
      9     --csv - comma separated results
     10 
     11 This script doesn't really know much about benchmarks. It simply looks for
     12 similarly names directories and a results.txt file, and compares
     13 the results and presents it, along with a geometric mean.
     14 
     15 """
     16 
     17 from __future__ import print_function
     18 
     19 __author__ = 'bjanakiraman (at] google.com (Bhaskar Janakiraman)'
     20 
     21 import glob
     22 import math
     23 import argparse
     24 import re
     25 import sys
     26 
     27 from cros_utils import command_executer
     28 
     29 BENCHDIRS = ('%s/default/default/*/gcc-4.4.3-glibc-2.11.1-grte-k8-opt/ref/*'
     30              '/results.txt')
     31 
     32 # Common initializations
     33 cmd_executer = command_executer.GetCommandExecuter()
     34 
     35 
     36 def Usage(parser, message):
     37   print('ERROR: %s' % message)
     38   parser.print_help()
     39   sys.exit(0)
     40 
     41 
     42 def GetStats(in_file):
     43   """Return stats from file"""
     44   f = open(in_file, 'r')
     45   pairs = []
     46   for l in f:
     47     line = l.strip()
     48     # Look for match lines like the following:
     49     #       METRIC isolated TotalTime_ms (down, scalar) trial_run_0: ['1524.4']
     50     #       METRIC isolated isolated_walltime (down, scalar) trial_run_0: \
     51     #         ['167.407445192']
     52     m = re.match(r"METRIC\s+isolated\s+(\S+).*\['(\d+(?:\.\d+)?)'\]", line)
     53     if not m:
     54       continue
     55     metric = m.group(1)
     56     if re.match(r'isolated_walltime', metric):
     57       continue
     58 
     59     value = float(m.group(2))
     60     pairs.append((metric, value))
     61 
     62   return dict(pairs)
     63 
     64 
     65 def PrintDash(n):
     66   tmpstr = ''
     67   for _ in range(n):
     68     tmpstr += '-'
     69   print(tmpstr)
     70 
     71 
     72 def PrintHeaderCSV(hdr):
     73   tmpstr = ''
     74   for i in range(len(hdr)):
     75     if tmpstr != '':
     76       tmpstr += ','
     77     tmpstr += hdr[i]
     78   print(tmpstr)
     79 
     80 
     81 def PrintHeader(hdr):
     82   tot_len = len(hdr)
     83   PrintDash(tot_len * 15)
     84 
     85   tmpstr = ''
     86   for i in range(len(hdr)):
     87     tmpstr += '%15.15s' % hdr[i]
     88 
     89   print(tmpstr)
     90   PrintDash(tot_len * 15)
     91 
     92 
     93 def Main(argv):
     94   """Compare Benchmarks."""
     95   # Common initializations
     96 
     97   parser = argparse.ArgumentParser()
     98   parser.add_argument('-c',
     99                       '--csv',
    100                       dest='csv_output',
    101                       action='store_true',
    102                       default=False,
    103                       help='Output in csv form.')
    104   parser.add_argument('args', nargs='+', help='positional arguments: '
    105                       '<baseline-output-dir> <results-output-dir>')
    106 
    107   options = parser.parse_args(argv[1:])
    108 
    109   # validate args
    110   if len(options.args) != 2:
    111     Usage(parser, 'Needs <baseline output dir> <results output dir>')
    112 
    113   base_dir = options.args[0]
    114   res_dir = options.args[1]
    115 
    116   # find res benchmarks that have results
    117   resbenches_glob = BENCHDIRS % res_dir
    118   resbenches = glob.glob(resbenches_glob)
    119 
    120   basebenches_glob = BENCHDIRS % base_dir
    121   basebenches = glob.glob(basebenches_glob)
    122 
    123   to_compare = []
    124   for resbench in resbenches:
    125     tmp = resbench.replace(res_dir, base_dir, 1)
    126     if tmp in basebenches:
    127       to_compare.append((resbench, tmp))
    128 
    129   for (resfile, basefile) in to_compare:
    130     stats = GetStats(resfile)
    131     basestats = GetStats(basefile)
    132     # Print a header
    133     # benchname (remove results.txt), basetime, restime, %speed-up
    134     hdr = []
    135     benchname = re.split('/', resfile)[-2:-1][0]
    136     benchname = benchname.replace('chromeos__', '', 1)
    137     hdr.append(benchname)
    138     hdr.append('basetime')
    139     hdr.append('restime')
    140     hdr.append('%speed up')
    141     if options.csv_output:
    142       PrintHeaderCSV(hdr)
    143     else:
    144       PrintHeader(hdr)
    145 
    146     # For geomean computations
    147     prod = 1.0
    148     count = 0
    149     for key in stats.keys():
    150       if key in basestats.keys():
    151         # ignore very small values.
    152         if stats[key] < 0.01:
    153           continue
    154         count = count + 1
    155         prod = prod * (stats[key] / basestats[key])
    156         speedup = (basestats[key] - stats[key]) / basestats[key]
    157         speedup = speedup * 100.0
    158         if options.csv_output:
    159           print('%s,%f,%f,%f' % (key, basestats[key], stats[key], speedup))
    160         else:
    161           print('%15.15s%15.2f%15.2f%14.2f%%' % (key, basestats[key],
    162                                                  stats[key], speedup))
    163 
    164     prod = math.exp(1.0 / count * math.log(prod))
    165     prod = (1.0 - prod) * 100
    166     if options.csv_output:
    167       print('%s,,,%f' % ('Geomean', prod))
    168     else:
    169       print('%15.15s%15.15s%15.15s%14.2f%%' % ('Geomean', '', '', prod))
    170       print('')
    171   return 0
    172 
    173 
    174 if __name__ == '__main__':
    175   retval = Main(sys.argv)
    176   sys.exit(retval)
    177