Home | History | Annotate | Download | only in tko
      1 #  tko/nightly.py  code shared by various tko/*.cgi graphing scripts
      2 
      3 import cgi, cgitb
      4 import os, sys
      5 import common
      6 from autotest_lib.tko import db, plotgraph, perf
      7 from autotest_lib.client.common_lib import kernel_versions
      8 
      9 
     10 def add_kernel_jobs(label_pattern):
     11     cmd = "select job_idx from tko_jobs where label like '%s'" % label_pattern
     12     nrows = perf.db_cur.execute(cmd)
     13     return [row[0] for row in perf.db_cur.fetchall()]
     14 
     15 
     16 def is_filtered_platform(platform, platforms_filter):
     17     if not platforms_filter:
     18         return True
     19     for p in platforms_filter:
     20         if platform.startswith(p):
     21             return True
     22     return False
     23 
     24 
     25 def get_test_attributes(testrunx):
     26     cmd = ( "select attribute, value from tko_test_attributes"
     27             " where test_idx = %d" % testrunx )
     28     nrows = perf.db_cur.execute(cmd)
     29     return dict(perf.db_cur.fetchall())
     30 
     31 
     32 def get_antag(testrunx):
     33     attrs = get_test_attributes(testrunx)
     34     return attrs.get('antag', None)
     35 
     36 
     37 def matching_test_attributes(attrs, required_test_attributes):
     38     if not required_test_attributes:
     39         return True
     40     matches = [attrs[key] == required_test_attributes[key]
     41                for key in attrs if key in required_test_attributes]
     42     return min(matches+[True])  # True if all jointly-existing keys matched
     43 
     44 
     45 def collect_testruns(jobs, test, test_attributes,
     46                          platforms_filter, by_hosts, no_antag):
     47     # get test_runs run #s for 1 test on 1 kernel and some platforms
     48     # TODO: Is jobs list short enough to use directly in 1 sql cmd?
     49     # TODO: add filtering on test series?
     50     runs = {}   # platform --> list of test runs
     51     for jobx in jobs:
     52         cmd = ( "select test_idx, machine_idx from tko_tests"
     53                 " where job_idx = %s and test = %s" )
     54         args = [jobx, test]
     55         nrows = perf.db_cur.execute(cmd, args)
     56         for testrunx, machx in perf.db_cur.fetchall():
     57             platform, host = perf.machine_idx_to_platform_host(machx)
     58             if by_hosts:
     59                 platform += '.'+host
     60             if ( is_filtered_platform(platform, platforms_filter)  and
     61                  matching_test_attributes(get_test_attributes(testrunx),
     62                                           test_attributes) and
     63                  (not no_antag or get_antag(testrunx) == '') ):
     64                 runs.setdefault(platform, []).append(testrunx)
     65     return runs
     66 
     67 
     68 def all_tested_platforms(test_runs):
     69     # extract list of all tested platforms from test_runs table
     70     platforms = set()
     71     for kernel in test_runs:
     72         platforms.update(set(test_runs[kernel].keys()))
     73     return sorted(platforms)
     74 
     75 
     76 def divide_twoway_testruns(test_runs, platform):
     77     # partition all twoway runs based on name of antagonist progs
     78     twoway_runs = {}
     79     antagonists = set()
     80     for kernel in test_runs:
     81         runs = {}
     82         for testrunx in test_runs[kernel].get(platform, []):
     83             antag = get_antag(testrunx)
     84             if antag is not None:
     85                 runs.setdefault(antag, []).append(testrunx)
     86                 antagonists.add(antag)
     87         twoway_runs[kernel] = runs
     88     return twoway_runs, sorted(antagonists)
     89 
     90 
     91 def collect_raw_scores(runs, metric):
     92     # get unscaled scores of test runs for 1 test on certain jobs
     93     #   arrange them by platform type
     94     platform_scores = {}  # platform --> list of perf scores
     95     for platform in runs:
     96         vals = perf.get_metric_at_point(runs[platform], metric)
     97         if vals:
     98             platform_scores[platform] = vals
     99     return platform_scores
    100 
    101 
    102 def collect_scaled_scores(metric, test_runs, regressed_platforms, relative):
    103     # get scores of test runs for 1 test on some kernels and platforms
    104     # optionally make relative to oldest (?) kernel on that platform
    105     # arrange by plotline (ie platform) for gnuplot
    106     plot_data = {}  # platform --> (kernel --> list of perf scores)
    107     baseline = {}
    108     for kernel in sorted(test_runs.keys()):
    109         for platform in test_runs[kernel]:
    110             if not (regressed_platforms is None or
    111                     platform in regressed_platforms):
    112                 continue  # delete results for uninteresting platforms
    113             vals = perf.get_metric_at_point(test_runs[kernel][platform],
    114                                             metric)
    115             if vals:
    116                 if relative:
    117                     if platform not in baseline:
    118                         baseline[platform], std = plotgraph.avg_dev(vals)
    119                     vals = [v/baseline[platform] for v in vals]
    120                 pdp = plot_data.setdefault(platform, {})
    121                 pdp.setdefault(kernel, []).extend(vals)
    122     return plot_data
    123 
    124 
    125 def collect_twoway_scores(metric, antagonists, twoway_runs, relative):
    126     alone = ''
    127     plot_data = {}
    128     for kernel in twoway_runs:
    129         for test2 in antagonists:
    130             runs = twoway_runs[kernel].get(test2, [])
    131             vals = perf.get_metric_at_point(runs, metric)
    132             plot_data.setdefault(test2, {})
    133             if vals:
    134                 plot_data[test2][kernel] = vals
    135         if relative:
    136             vals = plot_data[alone].get(kernel, [])
    137             if vals:
    138                 baseline = perf.average(vals)
    139                 for test2 in antagonists:
    140                     vals = plot_data[test2].get(kernel, [])
    141                     vals = [val/baseline for val in vals]
    142                     if vals:
    143                         plot_data[test2][kernel] = vals
    144             else:
    145                 for test2 in antagonists:
    146                     if kernel in plot_data[test2]:
    147                         del plot_data[test2][kernel]
    148     return plot_data
    149 
    150 
    151 def find_regressions(kernels, test_runs, metric):
    152     # A test is regressed on some platform if its latest results are
    153     #  definitely lower than on the reference kernel.
    154     # Runs for the latest kernel may be underway and incomplete.
    155     # In that case, selectively use next-latest kernel.
    156     # TODO: the next-latest method hurts if latest run is not sorted last,
    157     #       or if there are several dev threads
    158     ref    = kernels[0]
    159     latest = kernels[-1]
    160     prev   = kernels[-2:][0]
    161     scores = {}  #  kernel --> (platform --> list of perf scores)
    162     for k in [ref, prev, latest]:
    163         if k in test_runs:
    164             scores[k] = collect_raw_scores(test_runs[k], metric)
    165     regressed_platforms = []
    166     for platform in scores[ref]:
    167         if latest in scores and platform in scores[latest]:
    168             k = latest
    169         elif prev in scores and platform in scores[prev]:
    170             k = prev
    171         else:  # perhaps due to decay of test machines
    172             k = ref  # no regression info avail
    173         ref_avg, ref_std = plotgraph.avg_dev(scores[ref][platform])
    174         avg,     std     = plotgraph.avg_dev(scores[ k ][platform])
    175         if avg+std < ref_avg-ref_std:
    176             regressed_platforms.append(platform)
    177     return sorted(regressed_platforms)
    178 
    179 
    180 def get_testrun_context(testrun):
    181     cmd = ( 'select tko_jobs.label, tko_jobs.tag, tko_tests.subdir,'
    182             ' tko_tests.started_time'
    183             ' from tko_jobs, tko_tests'
    184             ' where tko_jobs.job_idx = tko_tests.job_idx'
    185             ' and tko_tests.test_idx = %d' % testrun )
    186     nrows = perf.db_cur.execute(cmd)
    187     assert nrows == 1
    188     row = perf.db_cur.fetchone()
    189     row = [row[0], row[1], row[2], row[3].strftime('%m/%d/%y %H:%M')]
    190     return row
    191 
    192 
    193 def html_top():
    194     print "Content-Type: text/html\n\n<html><body>"
    195 
    196 
    197 def abs_rel_link(myurl, passthru):
    198     # link redraws current page with opposite absolute/relative choice
    199     mod_passthru = passthru[:]
    200     if 'absolute' in passthru:
    201         mod_passthru.remove('absolute')
    202         opposite = 'relative'
    203     else:
    204         mod_passthru.append('absolute')
    205         opposite = 'absolute'
    206     url = '%s?%s' % (myurl, '&'.join(mod_passthru))
    207     return "<a href='%s'> %s </a>" % (url, opposite)
    208 
    209 
    210 def table_1_metric_all_kernels(plot_data, columns, column_argname,
    211                                kernels, kernel_dates,
    212                                myurl, filtered_passthru):
    213     # generate html table of graph's numbers
    214     #   for 1 benchmark metric over all kernels (rows),
    215     #   over various platforms or various antagonists etc (cols).
    216     ref_thresholds = {}
    217     print "<table border=1 cellpadding=3 cellspacing=0>"
    218     print "<tr> <td><b> Kernel </b></td>",
    219     for label in columns:
    220         if not label and column_argname == 'antag':
    221             label = 'no antag'
    222         print "<td><b>", label.replace('_', '<br>_'), "</b></td>"
    223     print "</tr>"
    224     for kernel in kernels:
    225         print "<tr> <td><b>", kernel, "</b>",
    226         if kernel in kernel_dates:
    227             print "<br><small>", kernel_dates[kernel], "</small>"
    228         print "</td>"
    229         for col in columns:
    230             print "<td",
    231             vals = plot_data[col].get(kernel, [])
    232             if not vals:
    233                 print "> ?",
    234             else:
    235                 (avg, std_dev) = plotgraph.avg_dev(vals)
    236                 if col not in ref_thresholds:
    237                     ref_thresholds[col] = avg - std_dev
    238                 if avg+std_dev < ref_thresholds[col]:
    239                     print "bgcolor=pink",
    240                 print "> ",
    241                 args = filtered_passthru[:]
    242                 perf.append_cgi_args(args,
    243                    {column_argname:col, 'kernel':kernel})
    244                 print "<a href='%s?%s&runs&attrs'>" % (myurl,
    245                                                        '&'.join(args))
    246                 print "<b>%.4g</b>" % avg, "</a><br>",
    247                 print "&nbsp; <small> %dr   </small>" % len(vals),
    248                 print "&nbsp; <small> %.3g </small>" % std_dev,
    249             print "</td>"
    250         print "</tr>\n"
    251     print "</table>"
    252     print "<p> <b>Bold value:</b> Average of this metric, then <br>"
    253     print "number of good test runs, then standard deviation of those runs"
    254     print "<br> Pink if regressed from reference kernel"
    255 
    256 
    257 def table_all_metrics_1_platform(test_runs, platform, relative):
    258     # TODO: show std dev in cells
    259     #       can't mark regressions, since some metrics improve downwards
    260     kernels = perf.sort_kernels(test_runs.keys())
    261     scores = {}
    262     attrs = set()
    263     for kernel in kernels:
    264         testruns = test_runs[kernel].get(platform, [])
    265         if testruns:
    266             d = perf.collect_all_metrics_scores(testruns)
    267             scores[kernel] = d
    268             attrs.update(set(d.keys()))
    269         else:
    270             print "No runs completed on", kernel, "<br>"
    271     attrs = sorted(list(attrs))[:100]
    272 
    273     print "<table border=1 cellpadding=4 cellspacing=0>"
    274     print "<tr><td> Metric </td>"
    275     for kernel in kernels:
    276         kernel = kernel.replace("_", "_<br>")
    277         print "<td>", kernel, "</td>"
    278     print "</tr>"
    279     for attr in attrs:
    280         print "<tr>"
    281         print "<td>", attr, "</td>"
    282         baseline = None
    283         for kernel in kernels:
    284             print "<td>",
    285             if kernel in scores and attr in scores[kernel]:
    286                 (avg, dev) = plotgraph.avg_dev(scores[kernel][attr])
    287                 if baseline and relative:
    288                     percent = (avg/baseline - 1)*100
    289                     print "%+.1f%%" % percent,
    290                 else:
    291                     baseline = avg
    292                     print "%.4g" % avg,
    293             else:
    294                 print "?"
    295             print "</td>"
    296         print "</tr>"
    297     print "</table>"
    298 
    299 
    300 def table_variants_all_tests(plot_data, columns, colkeys, benchmarks,
    301                              myurl, filtered_passthru):
    302     # generate html table of graph's numbers
    303     #   for primary metric over all benchmarks (rows),
    304     #   on one platform and one kernel,
    305     #   over various combos of test run attribute constraints (cols).
    306     ref_thresholds = {}
    307     print "<table border=1 cellpadding=3 cellspacing=0>"
    308     print "<tr> <td><b> Benchmark </b></td>",
    309     for col in columns:
    310         print "<td><b>", colkeys[col].replace(',', ',<br>'), "</b></td>"
    311     print "</tr>"
    312     for benchmark in benchmarks:
    313         print "<tr> <td><b>", benchmark, "</b></td>"
    314         for col in columns:
    315             print "<td>",
    316             vals = plot_data[col].get(benchmark, [])
    317             if not vals:
    318                 print "?",
    319             else:
    320                 (avg, std_dev) = plotgraph.avg_dev(vals)
    321                 args = filtered_passthru[:]
    322                 perf.append_cgi_args(args, {'test':benchmark})
    323                 for keyval in colkeys[col].split(','):
    324                     key, val = keyval.split('=', 1)
    325                     perf.append_cgi_args(args, {key:val})
    326                 print "<a href='%s?%s&runs&attrs'>" % (myurl,
    327                                                        '&'.join(args))
    328                 print "<b>%.4g</b>" % avg, "</a><br>",
    329                 print "&nbsp; <small> %dr   </small>" % len(vals),
    330                 print "&nbsp; <small> %.3g </small>" % std_dev,
    331             print "</td>"
    332         print "</tr>\n"
    333     print "</table>"
    334     print "<p> <b>Bold value:</b> Average of this metric, then <br>"
    335     print "number of good test runs, then standard deviation of those runs"
    336 
    337 
    338 def table_testrun_details(runs, metric, tko_server, show_attrs):
    339     print "<table border=1 cellpadding=4 cellspacing=0>"
    340     print "<tr><td> %s metric </td>" % metric
    341     print "<td> Job label </td> <td> Job tag </td> <td> Run results </td>"
    342     print "<td> Started_time </td>"
    343     if show_attrs:
    344         print "<td> Test attributes </td>"
    345     print "</tr>\n"
    346 
    347     for testrunx in runs:
    348         print "<tr> <td>",
    349         vals = perf.get_metric_at_point([testrunx], metric)
    350         for v in vals:
    351             print "%.4g&nbsp;" % v,
    352         print "</td>"
    353         row = get_testrun_context(testrunx)
    354         row[2] = ( "<a href='//%s/results/%s/%s/results/keyval'> %s </a>"
    355                    % (tko_server, row[1], row[2], row[2]) )
    356         for v in row:
    357             print "<td> %s </td>" % v
    358         if show_attrs:
    359             attrs = get_test_attributes(testrunx)
    360             print "<td>",
    361             for attr in sorted(attrs.keys()):
    362                 if attr == "sysinfo-cmdline": continue
    363                 if attr[:4] == "svs-": continue
    364                 val = attrs[attr]
    365                 if len(val) > 40:
    366                     val = val[:40-3] + "..."
    367                 print "%s=%s &nbsp; &nbsp; " % (attr, val)
    368             print "</td>"
    369         print "</tr>\n"
    370     print "</table>"
    371 
    372 
    373 def overview_thumb(test, metric, myurl, passthru):
    374     pass_ = passthru + ['test=%s' % test]
    375     if metric:
    376         pass_ += ['metric=%s' % metric]
    377     pass_ = '&'.join(pass_)
    378     print "<a    href='%s?%s&table'>"             % (myurl, pass_)
    379     print "  <img src='%s?%s&size=450,500'> </a>" % (myurl, pass_)
    380     # embedded graphs fit 3 across on 1400x1050 laptop
    381 
    382 
    383 def graph_1_test(title, metric, plot_data, line_argname, lines,
    384                  kernel_legend, relative, size, dark=False):
    385     # generate graph image for one benchmark, showing avg and
    386     #  std dev of one metric, over various kernels (X columns),
    387     #  over various platforms or antagonists etc (graphed lines)
    388     xlegend = kernel_legend
    389     ylegend = metric.capitalize()
    390     if relative:
    391         ylegend += ', Relative'
    392         ymin = 0.8
    393     else:
    394         ymin = None
    395     if len(lines) > 1:
    396         keytitle = line_argname.capitalize() + ':'
    397     else:
    398         keytitle = ''
    399     graph = plotgraph.gnuplot(title, xlegend, ylegend, size=size,
    400                               xsort=perf.sort_kernels, keytitle=keytitle)
    401     for line in lines:
    402         label = line
    403         if not label and line_argname == 'antag':
    404             label = 'no antag'
    405         graph.add_dataset(label, plot_data[line])
    406     graph.plot(cgi_header=True, ymin=ymin, dark=dark)
    407 
    408 
    409 def graph_variants_all_tests(title, plot_data, linekeys, size, dark):
    410         # generate graph image showing all benchmarks
    411         #   on one platform and one kernel,
    412         #   over various combos of test run attribute constraints (lines).
    413     xlegend = "Benchmark"
    414     ylegend = "Relative Perf"
    415     graph = plotgraph.gnuplot(title, xlegend, ylegend, size=size)
    416     for i in linekeys:
    417         graph.add_dataset(linekeys[i], plot_data[i])
    418     graph.plot(cgi_header=True, dark=dark, ymin=0.8)
    419 
    420 
    421 class generate_views(object):
    422 
    423 
    424     def __init__(self, kernel_legend, benchmarks, test_group,
    425                      site_benchmark_metrics, tko_server,
    426                      jobs_selector, no_antag):
    427         self.kernel_legend = kernel_legend
    428         self.benchmarks = benchmarks
    429         self.test_group = test_group
    430         self.tko_server = tko_server
    431         self.jobs_selector = jobs_selector
    432         self.no_antag = no_antag
    433 
    434         cgitb.enable()
    435         test, antagonists = self.parse_most_cgi_args()
    436 
    437         perf.init(tko_server=tko_server)
    438         for b in site_benchmark_metrics:
    439             perf.add_benchmark_main_metric(b, site_benchmark_metrics[b])
    440 
    441         self.test_runs = {}     # kernel --> (platform --> list of test runs)
    442         self.job_table = {}     # kernel id --> list of job idxs
    443         self.kernel_dates = {}  # kernel id --> date of nightly test
    444 
    445         vary = self.cgiform.getlist('vary')
    446         if vary:
    447             platform = self.platforms_filter[0]
    448             self.analyze_variants_all_tests_1_platform(platform, vary)
    449         elif test:
    450             self.analyze_1_test(test, antagonists)
    451         else:
    452             self.overview_page_all_tests(self.benchmarks, antagonists)
    453 
    454 
    455     def collect_all_testruns(self, trimmed_kernels, test):
    456     # get test_runs run #s for 1 test on some kernels and platforms
    457         for kernel in trimmed_kernels:
    458             runs = collect_testruns(self.job_table[kernel], test,
    459                                     self.test_attributes, self.platforms_filter,
    460                                     'by_hosts' in self.toggles, self.no_antag)
    461             if runs:
    462                 self.test_runs[kernel] = runs
    463 
    464 
    465     def table_for_graph_1_test(self, title, metric, plot_data,
    466                                  column_argname, columns, filtered_passthru):
    467         # generate detailed html page with 1 graph and corresp numbers
    468         #   for 1 benchmark metric over all kernels (rows),
    469         #   over various platforms or various antagonists etc (cols).
    470         html_top()
    471         print '<h3> %s </h3>' % title
    472         print ('%s, machine group %s on //%s server <br>' %
    473                (self.kernel_legend, self.test_group, self.tko_server))
    474         if self.test_tag:
    475             print '%s test script series <br>' % self.test_tag[1:]
    476 
    477         print "<img src='%s?%s'>" % (self.myurl, '&'.join(self.passthru))
    478 
    479         link = abs_rel_link(self.myurl, self.passthru+['table'])
    480         print "<p><p> <h4> Redraw this with %s performance? </h4>" % link
    481 
    482         heading = "%s, %s metric" % (title, metric)
    483         if self.relative:
    484             heading += ", relative"
    485         print "<p><p> <h3> %s: </h3>" % heading
    486         table_1_metric_all_kernels(plot_data, columns, column_argname,
    487                                    self.kernels, self.kernel_dates,
    488                                    self.myurl, filtered_passthru)
    489         print "</body></html>"
    490 
    491 
    492     def graph_1_test_all_platforms(self, test, metric, platforms, plot_data):
    493         # generate graph image for one benchmark
    494         title = test.capitalize()
    495         if 'regress' in self.toggles:
    496             title += ' Regressions'
    497         if 'table' in self.cgiform:
    498             self.table_for_graph_1_test(title, metric, plot_data,
    499                                         'platforms', platforms,
    500                                         filtered_passthru=self.passthru)
    501         else:
    502             graph_1_test(title, metric, plot_data, 'platforms', platforms,
    503                          self.kernel_legend, self.relative,
    504                          self.size, 'dark' in self.toggles)
    505 
    506 
    507     def testrun_details(self, title, runs, metric):
    508         html_top()
    509         print '<h3> %s </h3>' % title
    510         print ('%s, machine group %s on //%s server' %
    511                (self.kernel_legend, self.test_group, self.tko_server))
    512         if self.test_tag:
    513             print '<br> %s test script series' % self.test_tag[1:]
    514         print '<p>'
    515         table_testrun_details(runs, metric,
    516                               self.tko_server, 'attrs' in self.cgiform)
    517         print "</body></html>"
    518 
    519 
    520     def testrun_details_for_1_test_kernel_platform(self, test,
    521                                                    metric, platform):
    522         default_kernel = min(self.test_runs.keys())
    523         kernel = self.cgiform.getvalue('kernel', default_kernel)
    524         title = '%s on %s using %s' % (test.capitalize(), platform, kernel)
    525         runs = self.test_runs[kernel].get(platform, [])
    526         self.testrun_details(title, runs, metric)
    527 
    528 
    529     def analyze_1_metric_all_platforms(self, test, metric):
    530         if 'regress' in self.toggles:
    531             regressed_platforms = find_regressions(self.kernels, self.test_runs,
    532                                                    metric)
    533         else:
    534             regressed_platforms = None
    535         plot_data = collect_scaled_scores(metric, self.test_runs,
    536                                           regressed_platforms, self.relative)
    537         platforms = sorted(plot_data.keys())
    538         if not plot_data:
    539             html_top()
    540             print 'No runs'
    541         elif 'runs' in self.cgiform:
    542             self.testrun_details_for_1_test_kernel_platform(test, metric,
    543                                                             platforms[0])
    544         else:
    545             self.graph_1_test_all_platforms(test, metric, platforms, plot_data)
    546 
    547 
    548     def analyze_all_metrics_1_platform(self, test, platform):
    549         # TODO: show #runs in header
    550         html_top()
    551         heading = "%s %s:&nbsp %s" % (self.test_group, self.kernel_legend,
    552                                       test.capitalize())
    553         print "<h2> %s </h2>" % heading
    554         print "platform=%s <br>" % platform
    555         for attr in self.test_attributes:
    556             print "%s=%s &nbsp; " % (attr, self.test_attributes[attr])
    557         print "<p>"
    558         table_all_metrics_1_platform(self.test_runs, platform, self.relative)
    559         print "</body></html>"
    560 
    561 
    562     def table_for_variants_all_tests(self, title, plot_data, colkeys, columns,
    563                                        filtered_passthru, test_tag):
    564         # generate detailed html page with 1 graph and corresp numbers
    565         #   for primary metric over all benchmarks (rows),
    566         #   on one platform and one kernel,
    567         #   over various combos of test run attribute constraints (cols).
    568         html_top()
    569         print '<h3> %s </h3>' % title
    570         print ('%s, machine group %s on //%s server <br>' %
    571                (self.kernel_legend, self.test_group, self.tko_server))
    572         if test_tag:
    573             print '%s test script series <br>' % test_tag[1:]
    574 
    575         varies = ['vary='+colkeys[col] for col in columns]
    576         print "<img src='%s?%s'>" % (self.myurl, '&'.join(self.passthru+varies))
    577 
    578         print "<p><p> <h3> %s: </h3>" % title
    579         table_variants_all_tests(plot_data, columns, colkeys, self.benchmarks,
    580                                  self.myurl, filtered_passthru)
    581         print "</body></html>"
    582 
    583 
    584     def analyze_variants_all_tests_1_platform(self, platform, vary):
    585         # generate one graph image for results of all benchmarks
    586         # on one platform and one kernel, comparing effects of
    587         # two or more combos of kernel options (test run attributes)
    588         #   (numa_fake,stale_page,kswapd_merge,sched_idle, etc)
    589         kernel = self.cgiform.getvalue('kernel', 'some_kernel')
    590         self.passthru.append('kernel=%s' % kernel)
    591 
    592         # two or more vary_groups, one for each plotted line,
    593         # each group begins with vary= and ends with next  &
    594         # each group has comma-separated list of test attribute key=val pairs
    595         #    eg   vary=keyval1,keyval2&vary=keyval3,keyval4
    596         vary_groups = [dict(pair.split('=',1) for pair
    597                             in vary_group.split(','))
    598                        for vary_group in vary]
    599 
    600         test = self.benchmarks[0]  # pick any test in all jobs
    601         kernels, test_tag = self.jobs_selector(test, self.job_table,
    602                                                self.kernel_dates)
    603 
    604         linekeys = {}
    605         plot_data = {}
    606         baselines = {}
    607         for i, vary_group in enumerate(vary_groups):
    608             group_attributes = self.test_attributes.copy()
    609             group_attributes.update(vary_group)
    610             linekey = ','.join('%s=%s' % (attr, vary_group[attr])
    611                                for attr in vary_group)
    612             linekeys[i] = linekey
    613             data = {}
    614             for benchmark in self.benchmarks:
    615                 metric = perf.benchmark_main_metric(benchmark)
    616                 runs = collect_testruns(self.job_table[kernel],
    617                                         benchmark+test_tag,
    618                                         group_attributes,
    619                                         self.platforms_filter,
    620                                         'by_hosts' in self.toggles,
    621                                         self.no_antag)
    622                 vals = []
    623                 for testrunx in runs[platform]:
    624                     vals += perf.get_metric_at_point([testrunx], metric)
    625                 if vals:
    626                     if benchmark not in baselines:
    627                         baselines[benchmark], stddev = plotgraph.avg_dev(vals)
    628                     vals = [val/baselines[benchmark] for val in vals]
    629                     data[benchmark] = vals
    630             plot_data[i] = data
    631 
    632         title  = "%s on %s" % (kernel, platform)
    633         for attr in self.test_attributes:
    634             title += ', %s=%s' % (attr, self.test_attributes[attr])
    635         if 'table' in self.cgiform:
    636             self.table_for_variants_all_tests(title, plot_data, linekeys,
    637                                range(len(linekeys)),
    638                                filtered_passthru=self.passthru,
    639                                test_tag=test_tag)
    640         else:
    641             graph_variants_all_tests(title, plot_data, linekeys,
    642                                      self.size, 'dark' in self.toggles)
    643 
    644 
    645     def graph_twoway_antagonists_1_test_1_platform(
    646                   self, test, metric, platform, antagonists, twoway_runs):
    647         # generate graph of one benchmark's performance paired with
    648         #    various antagonists, with one plotted line per antagonist,
    649         #    over most kernels (X axis), all on one machine type
    650         # performance is relative to the no-antag baseline case
    651         plot_data = collect_twoway_scores(metric, antagonists,
    652                                           twoway_runs, self.relative)
    653         title  = "%s vs. an Antagonist on %s:" % (test.capitalize(), platform)
    654         if 'table' in self.cgiform:
    655             filtered_passthru = [arg for arg in self.passthru
    656                                      if not arg.startswith('antag=')]
    657             self.table_for_graph_1_test(title, metric, plot_data,
    658                                    'antag', antagonists,
    659                                    filtered_passthru=filtered_passthru)
    660         else:
    661             graph_1_test(title, metric, plot_data, 'antag', antagonists,
    662                          self.kernel_legend, self.relative,
    663                          self.size, 'dark' in self.toggles)
    664 
    665 
    666     def testrun_details_for_twoway_test(self, test, metric, platform,
    667                                         antagonist, twoway_runs):
    668         default_kernel = min(twoway_runs.keys())
    669         kernel = self.cgiform.getvalue('kernel', default_kernel)
    670         title = '%s vs. Antagonist %s on %s using %s' % (
    671                 test.capitalize(), antagonist.capitalize(), platform, kernel)
    672         runs = twoway_runs[kernel].get(antagonist, [])
    673         self.testrun_details(title, runs, metric)
    674 
    675 
    676     def analyze_twoway_antagonists_1_test_1_platform(
    677                   self, test, metric, platform, antagonists):
    678         twoway_runs, all_antagonists = divide_twoway_testruns(self.test_runs,
    679                                                               platform)
    680         if antagonists == ['*']:
    681             antagonists = all_antagonists
    682         if not twoway_runs:
    683             html_top()
    684             print 'No runs'
    685         elif 'runs' in self.cgiform:
    686             self.testrun_details_for_twoway_test(
    687                     test, metric, platform, antagonists[0], twoway_runs)
    688         else:
    689             self.graph_twoway_antagonists_1_test_1_platform(
    690                     test, metric, platform, antagonists, twoway_runs)
    691 
    692 
    693     def get_twoway_default_platform(self):
    694         if self.platforms_filter:
    695             return self.platforms_filter[0]
    696         test = 'unixbench'
    697         kernels, test_tag = self.jobs_selector(test, self.job_table,
    698                                                self.kernel_dates)
    699         self.collect_all_testruns(kernels, test+test_tag)
    700         return all_tested_platforms(self.test_runs)[0]
    701 
    702 
    703     def overview_page_all_tests(self, benchmarks, antagonists):
    704         # generate overview html page with small graphs for each benchmark
    705         #   linking to detailed html page for that benchmark
    706         #   recursively link to this same cgi to generate each image
    707         html_top()
    708         if antagonists is not None:
    709             heading = ('Twoway Container Isolation using %s on %s' %
    710                        (self.kernel_legend, self.get_twoway_default_platform()))
    711         else:
    712             heading = '%s, %s Benchmarks' % (self.kernel_legend,
    713                                              self.test_group)
    714         if 'regress' in self.toggles:
    715             heading += ", Regressions Only"
    716         print "<h3> %s </h3>" % heading
    717         for test in benchmarks:
    718             overview_thumb(test, '', self.myurl, self.passthru)
    719             if test == 'unixbench':
    720                 overview_thumb('unixbench', 'Process_creation',
    721                                self.myurl, self.passthru)
    722 
    723         link = abs_rel_link(self.myurl, self.passthru)
    724         print "<p><p> <h4> Redraw this with %s performance? </h4>" % link
    725         print "</body></html>"
    726 
    727 
    728     def analyze_1_test(self, test, antagonists):
    729         self.passthru.append('test=%s' % test)
    730         metric = self.cgiform.getvalue('metric', '')
    731         if metric:
    732             self.passthru.append('metric=%s' % metric)
    733         else:
    734             metric = perf.benchmark_main_metric(test)
    735             assert metric, "no default metric for test %s" % test
    736         self.kernels, self.test_tag = self.jobs_selector(test, self.job_table,
    737                                                          self.kernel_dates)
    738         self.collect_all_testruns(self.kernels, test+self.test_tag)
    739         if not self.platforms_filter and (metric == '*' or
    740                                           antagonists is not None):
    741             # choose default platform
    742             self.platforms_filter = all_tested_platforms(self.test_runs)[0:1]
    743             self.passthru.append('platforms=%s' %
    744                                  ','.join(self.platforms_filter))
    745         if antagonists is not None:
    746             antagonists = antagonists.split(',')
    747             if len(antagonists) == 1 and antagonists != ['*']:
    748                 self.relative = False
    749             self.analyze_twoway_antagonists_1_test_1_platform(
    750                     test, metric, self.platforms_filter[0], antagonists)
    751         elif metric == '*':
    752             platform = self.platforms_filter[0]
    753             self.analyze_all_metrics_1_platform(test, platform)
    754         else:
    755             self.analyze_1_metric_all_platforms(test, metric)
    756 
    757 
    758     def parse_most_cgi_args(self):
    759         self.myurl = os.path.basename(sys.argv[0])
    760         self.cgiform = cgi.FieldStorage(keep_blank_values=True)
    761         self.size = self.cgiform.getvalue('size', '1200,850')
    762         all_toggles = set(('absolute', 'regress', 'dark', 'by_hosts'))
    763         self.toggles = set(tog for tog in all_toggles if tog in self.cgiform)
    764         platforms = self.cgiform.getvalue('platforms', '')
    765         if '.' in platforms:
    766             self.toggles.add('by_hosts')
    767         self.passthru = list(self.toggles)
    768         self.relative = 'absolute' not in self.toggles
    769         if platforms:
    770             self.passthru.append('platforms=%s' % platforms)
    771             self.platforms_filter = platforms.split(',')
    772         else:
    773             self.platforms_filter = []
    774         self.test_attributes = perf.parse_test_attr_args(self.cgiform)
    775         perf.append_cgi_args(self.passthru, self.test_attributes)
    776         test = self.cgiform.getvalue('test', '')
    777         if 'antag' in self.cgiform:
    778             antagonists = ','.join(self.cgiform.getlist('antag'))
    779             #      antag=*
    780             #   or antag=test1,test2,test3,...
    781             #   or antag=test1&antag=test2&...
    782             #   testN is empty for solo case of no antagonist
    783             self.passthru.append('antag=%s' % antagonists)
    784         else:
    785             antagonists = None  # not same as ''
    786         return test, antagonists
    787