Home | History | Annotate | Download | only in tools
      1 #!/usr/bin/env python
      2 # Copyright 2014 the V8 project authors. All rights reserved.
      3 # Use of this source code is governed by a BSD-style license that can be
      4 # found in the LICENSE file.
      5 
      6 """
      7 Performance runner for d8.
      8 
      9 Call e.g. with tools/run-benchmarks.py --arch ia32 some_suite.json
     10 
     11 The suite json format is expected to be:
     12 {
     13   "path": <relative path chunks to benchmark resources and main file>,
     14   "name": <optional suite name, file name is default>,
     15   "archs": [<architecture name for which this suite is run>, ...],
     16   "binary": <name of binary to run, default "d8">,
     17   "flags": [<flag to d8>, ...],
     18   "run_count": <how often will this suite run (optional)>,
     19   "run_count_XXX": <how often will this suite run for arch XXX (optional)>,
     20   "resources": [<js file to be loaded before main>, ...]
     21   "main": <main js benchmark runner file>,
     22   "results_regexp": <optional regexp>,
     23   "results_processor": <optional python results processor script>,
     24   "units": <the unit specification for the performance dashboard>,
     25   "benchmarks": [
     26     {
     27       "name": <name of the benchmark>,
     28       "results_regexp": <optional more specific regexp>,
     29       "results_processor": <optional python results processor script>,
     30       "units": <the unit specification for the performance dashboard>,
     31     }, ...
     32   ]
     33 }
     34 
     35 The benchmarks field can also nest other suites in arbitrary depth. A suite
     36 with a "main" file is a leaf suite that can contain one more level of
     37 benchmarks.
     38 
     39 A suite's results_regexp is expected to have one string place holder
     40 "%s" for the benchmark name. A benchmark's results_regexp overwrites suite
     41 defaults.
     42 
     43 A suite's results_processor may point to an optional python script. If
     44 specified, it is called after running the benchmarks like this (with a path
     45 relatve to the suite level's path):
     46 <results_processor file> <same flags as for d8> <suite level name> <output>
     47 
     48 The <output> is a temporary file containing d8 output. The results_regexp will
     49 be applied to the output of this script.
     50 
     51 A suite without "benchmarks" is considered a benchmark itself.
     52 
     53 Full example (suite with one runner):
     54 {
     55   "path": ["."],
     56   "flags": ["--expose-gc"],
     57   "archs": ["ia32", "x64"],
     58   "run_count": 5,
     59   "run_count_ia32": 3,
     60   "main": "run.js",
     61   "results_regexp": "^%s: (.+)$",
     62   "units": "score",
     63   "benchmarks": [
     64     {"name": "Richards"},
     65     {"name": "DeltaBlue"},
     66     {"name": "NavierStokes",
     67      "results_regexp": "^NavierStokes: (.+)$"}
     68   ]
     69 }
     70 
     71 Full example (suite with several runners):
     72 {
     73   "path": ["."],
     74   "flags": ["--expose-gc"],
     75   "archs": ["ia32", "x64"],
     76   "run_count": 5,
     77   "units": "score",
     78   "benchmarks": [
     79     {"name": "Richards",
     80      "path": ["richards"],
     81      "main": "run.js",
     82      "run_count": 3,
     83      "results_regexp": "^Richards: (.+)$"},
     84     {"name": "NavierStokes",
     85      "path": ["navier_stokes"],
     86      "main": "run.js",
     87      "results_regexp": "^NavierStokes: (.+)$"}
     88   ]
     89 }
     90 
     91 Path pieces are concatenated. D8 is always run with the suite's path as cwd.
     92 """
     93 
     94 import json
     95 import optparse
     96 import os
     97 import re
     98 import sys
     99 
    100 from testrunner.local import commands
    101 from testrunner.local import utils
    102 
    103 ARCH_GUESS = utils.DefaultArch()
    104 SUPPORTED_ARCHS = ["android_arm",
    105                    "android_arm64",
    106                    "android_ia32",
    107                    "arm",
    108                    "ia32",
    109                    "mips",
    110                    "mipsel",
    111                    "nacl_ia32",
    112                    "nacl_x64",
    113                    "x64",
    114                    "arm64"]
    115 
    116 
    117 class Results(object):
    118   """Place holder for result traces."""
    119   def __init__(self, traces=None, errors=None):
    120     self.traces = traces or []
    121     self.errors = errors or []
    122 
    123   def ToDict(self):
    124     return {"traces": self.traces, "errors": self.errors}
    125 
    126   def WriteToFile(self, file_name):
    127     with open(file_name, "w") as f:
    128       f.write(json.dumps(self.ToDict()))
    129 
    130   def __add__(self, other):
    131     self.traces += other.traces
    132     self.errors += other.errors
    133     return self
    134 
    135   def __str__(self):  # pragma: no cover
    136     return str(self.ToDict())
    137 
    138 
    139 class Node(object):
    140   """Represents a node in the benchmark suite tree structure."""
    141   def __init__(self, *args):
    142     self._children = []
    143 
    144   def AppendChild(self, child):
    145     self._children.append(child)
    146 
    147 
    148 class DefaultSentinel(Node):
    149   """Fake parent node with all default values."""
    150   def __init__(self):
    151     super(DefaultSentinel, self).__init__()
    152     self.binary = "d8"
    153     self.run_count = 10
    154     self.path = []
    155     self.graphs = []
    156     self.flags = []
    157     self.resources = []
    158     self.results_regexp = None
    159     self.units = "score"
    160 
    161 
    162 class Graph(Node):
    163   """Represents a benchmark suite definition.
    164 
    165   Can either be a leaf or an inner node that provides default values.
    166   """
    167   def __init__(self, suite, parent, arch):
    168     super(Graph, self).__init__()
    169     self._suite = suite
    170 
    171     assert isinstance(suite.get("path", []), list)
    172     assert isinstance(suite["name"], basestring)
    173     assert isinstance(suite.get("flags", []), list)
    174     assert isinstance(suite.get("resources", []), list)
    175 
    176     # Accumulated values.
    177     self.path = parent.path[:] + suite.get("path", [])
    178     self.graphs = parent.graphs[:] + [suite["name"]]
    179     self.flags = parent.flags[:] + suite.get("flags", [])
    180     self.resources = parent.resources[:] + suite.get("resources", [])
    181 
    182     # Descrete values (with parent defaults).
    183     self.binary = suite.get("binary", parent.binary)
    184     self.run_count = suite.get("run_count", parent.run_count)
    185     self.run_count = suite.get("run_count_%s" % arch, self.run_count)
    186     self.units = suite.get("units", parent.units)
    187 
    188     # A regular expression for results. If the parent graph provides a
    189     # regexp and the current suite has none, a string place holder for the
    190     # suite name is expected.
    191     # TODO(machenbach): Currently that makes only sense for the leaf level.
    192     # Multiple place holders for multiple levels are not supported.
    193     if parent.results_regexp:
    194       regexp_default = parent.results_regexp % suite["name"]
    195     else:
    196       regexp_default = None
    197     self.results_regexp = suite.get("results_regexp", regexp_default)
    198 
    199 
    200 class Trace(Graph):
    201   """Represents a leaf in the benchmark suite tree structure.
    202 
    203   Handles collection of measurements.
    204   """
    205   def __init__(self, suite, parent, arch):
    206     super(Trace, self).__init__(suite, parent, arch)
    207     assert self.results_regexp
    208     self.results = []
    209     self.errors = []
    210 
    211   def ConsumeOutput(self, stdout):
    212     try:
    213       self.results.append(
    214           re.search(self.results_regexp, stdout, re.M).group(1))
    215     except:
    216       self.errors.append("Regexp \"%s\" didn't match for benchmark %s."
    217                          % (self.results_regexp, self.graphs[-1]))
    218 
    219   def GetResults(self):
    220     return Results([{
    221       "graphs": self.graphs,
    222       "units": self.units,
    223       "results": self.results,
    224     }], self.errors)
    225 
    226 
    227 class Runnable(Graph):
    228   """Represents a runnable benchmark suite definition (i.e. has a main file).
    229   """
    230   @property
    231   def main(self):
    232     return self._suite["main"]
    233 
    234   def ChangeCWD(self, suite_path):
    235     """Changes the cwd to to path defined in the current graph.
    236 
    237     The benchmarks are supposed to be relative to the suite configuration.
    238     """
    239     suite_dir = os.path.abspath(os.path.dirname(suite_path))
    240     bench_dir = os.path.normpath(os.path.join(*self.path))
    241     os.chdir(os.path.join(suite_dir, bench_dir))
    242 
    243   def GetCommand(self, shell_dir):
    244     # TODO(machenbach): This requires +.exe if run on windows.
    245     return (
    246       [os.path.join(shell_dir, self.binary)] +
    247       self.flags +
    248       self.resources +
    249       [self.main]
    250     )
    251 
    252   def Run(self, runner):
    253     """Iterates over several runs and handles the output for all traces."""
    254     for stdout in runner():
    255       for trace in self._children:
    256         trace.ConsumeOutput(stdout)
    257     return reduce(lambda r, t: r + t.GetResults(), self._children, Results())
    258 
    259 
    260 class RunnableTrace(Trace, Runnable):
    261   """Represents a runnable benchmark suite definition that is a leaf."""
    262   def __init__(self, suite, parent, arch):
    263     super(RunnableTrace, self).__init__(suite, parent, arch)
    264 
    265   def Run(self, runner):
    266     """Iterates over several runs and handles the output."""
    267     for stdout in runner():
    268       self.ConsumeOutput(stdout)
    269     return self.GetResults()
    270 
    271 
    272 def MakeGraph(suite, arch, parent):
    273   """Factory method for making graph objects."""
    274   if isinstance(parent, Runnable):
    275     # Below a runnable can only be traces.
    276     return Trace(suite, parent, arch)
    277   elif suite.get("main"):
    278     # A main file makes this graph runnable.
    279     if suite.get("benchmarks"):
    280       # This graph has subbenchmarks (traces).
    281       return Runnable(suite, parent, arch)
    282     else:
    283       # This graph has no subbenchmarks, it's a leaf.
    284       return RunnableTrace(suite, parent, arch)
    285   elif suite.get("benchmarks"):
    286     # This is neither a leaf nor a runnable.
    287     return Graph(suite, parent, arch)
    288   else:  # pragma: no cover
    289     raise Exception("Invalid benchmark suite configuration.")
    290 
    291 
    292 def BuildGraphs(suite, arch, parent=None):
    293   """Builds a tree structure of graph objects that corresponds to the suite
    294   configuration.
    295   """
    296   parent = parent or DefaultSentinel()
    297 
    298   # TODO(machenbach): Implement notion of cpu type?
    299   if arch not in suite.get("archs", ["ia32", "x64"]):
    300     return None
    301 
    302   graph = MakeGraph(suite, arch, parent)
    303   for subsuite in suite.get("benchmarks", []):
    304     BuildGraphs(subsuite, arch, graph)
    305   parent.AppendChild(graph)
    306   return graph
    307 
    308 
    309 def FlattenRunnables(node):
    310   """Generator that traverses the tree structure and iterates over all
    311   runnables.
    312   """
    313   if isinstance(node, Runnable):
    314     yield node
    315   elif isinstance(node, Node):
    316     for child in node._children:
    317       for result in FlattenRunnables(child):
    318         yield result
    319   else:  # pragma: no cover
    320     raise Exception("Invalid benchmark suite configuration.")
    321 
    322 
    323 # TODO: Implement results_processor.
    324 def Main(args):
    325   parser = optparse.OptionParser()
    326   parser.add_option("--arch",
    327                     help=("The architecture to run tests for, "
    328                           "'auto' or 'native' for auto-detect"),
    329                     default="x64")
    330   parser.add_option("--buildbot",
    331                     help="Adapt to path structure used on buildbots",
    332                     default=False, action="store_true")
    333   parser.add_option("--json-test-results",
    334                     help="Path to a file for storing json results.")
    335   parser.add_option("--outdir", help="Base directory with compile output",
    336                     default="out")
    337   (options, args) = parser.parse_args(args)
    338 
    339   if len(args) == 0:  # pragma: no cover
    340     parser.print_help()
    341     return 1
    342 
    343   if options.arch in ["auto", "native"]:  # pragma: no cover
    344     options.arch = ARCH_GUESS
    345 
    346   if not options.arch in SUPPORTED_ARCHS:  # pragma: no cover
    347     print "Unknown architecture %s" % options.arch
    348     return 1
    349 
    350   workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
    351 
    352   if options.buildbot:
    353     shell_dir = os.path.join(workspace, options.outdir, "Release")
    354   else:
    355     shell_dir = os.path.join(workspace, options.outdir,
    356                              "%s.release" % options.arch)
    357 
    358   results = Results()
    359   for path in args:
    360     path = os.path.abspath(path)
    361 
    362     if not os.path.exists(path):  # pragma: no cover
    363       results.errors.append("Benchmark file %s does not exist." % path)
    364       continue
    365 
    366     with open(path) as f:
    367       suite = json.loads(f.read())
    368 
    369     # If no name is given, default to the file name without .json.
    370     suite.setdefault("name", os.path.splitext(os.path.basename(path))[0])
    371 
    372     for runnable in FlattenRunnables(BuildGraphs(suite, options.arch)):
    373       print ">>> Running suite: %s" % "/".join(runnable.graphs)
    374       runnable.ChangeCWD(path)
    375 
    376       def Runner():
    377         """Output generator that reruns several times."""
    378         for i in xrange(0, max(1, runnable.run_count)):
    379           # TODO(machenbach): Make timeout configurable in the suite definition.
    380           # Allow timeout per arch like with run_count per arch.
    381           output = commands.Execute(runnable.GetCommand(shell_dir), timeout=60)
    382           print ">>> Stdout (#%d):" % (i + 1)
    383           print output.stdout
    384           if output.stderr:  # pragma: no cover
    385             # Print stderr for debugging.
    386             print ">>> Stderr (#%d):" % (i + 1)
    387             print output.stderr
    388           yield output.stdout
    389 
    390       # Let runnable iterate over all runs and handle output.
    391       results += runnable.Run(Runner)
    392 
    393   if options.json_test_results:
    394     results.WriteToFile(options.json_test_results)
    395   else:  # pragma: no cover
    396     print results
    397 
    398   return min(1, len(results.errors))
    399 
    400 if __name__ == "__main__":  # pragma: no cover
    401   sys.exit(Main(sys.argv[1:]))
    402