Home | History | Annotate | Download | only in tools
      1 #!/usr/bin/env python
      2 # Copyright 2014 the V8 project authors. All rights reserved.
      3 # Use of this source code is governed by a BSD-style license that can be
      4 # found in the LICENSE file.
      5 
      6 """
      7 Performance runner for d8.
      8 
      9 Call e.g. with tools/run-perf.py --arch ia32 some_suite.json
     10 
     11 The suite json format is expected to be:
     12 {
     13   "path": <relative path chunks to perf resources and main file>,
     14   "name": <optional suite name, file name is default>,
     15   "archs": [<architecture name for which this suite is run>, ...],
     16   "binary": <name of binary to run, default "d8">,
     17   "flags": [<flag to d8>, ...],
     18   "test_flags": [<flag to the test file>, ...],
     19   "run_count": <how often will this suite run (optional)>,
     20   "run_count_XXX": <how often will this suite run for arch XXX (optional)>,
     21   "resources": [<js file to be moved to android device>, ...]
     22   "main": <main js perf runner file>,
     23   "results_regexp": <optional regexp>,
     24   "results_processor": <optional python results processor script>,
     25   "units": <the unit specification for the performance dashboard>,
     26   "tests": [
     27     {
     28       "name": <name of the trace>,
     29       "results_regexp": <optional more specific regexp>,
     30       "results_processor": <optional python results processor script>,
     31       "units": <the unit specification for the performance dashboard>,
     32     }, ...
     33   ]
     34 }
     35 
     36 The tests field can also nest other suites in arbitrary depth. A suite
     37 with a "main" file is a leaf suite that can contain one more level of
     38 tests.
     39 
     40 A suite's results_regexp is expected to have one string place holder
     41 "%s" for the trace name. A trace's results_regexp overwrites suite
     42 defaults.
     43 
     44 A suite's results_processor may point to an optional python script. If
     45 specified, it is called after running the tests like this (with a path
     46 relatve to the suite level's path):
     47 <results_processor file> <same flags as for d8> <suite level name> <output>
     48 
     49 The <output> is a temporary file containing d8 output. The results_regexp will
     50 be applied to the output of this script.
     51 
     52 A suite without "tests" is considered a performance test itself.
     53 
     54 Full example (suite with one runner):
     55 {
     56   "path": ["."],
     57   "flags": ["--expose-gc"],
     58   "test_flags": ["5"],
     59   "archs": ["ia32", "x64"],
     60   "run_count": 5,
     61   "run_count_ia32": 3,
     62   "main": "run.js",
     63   "results_regexp": "^%s: (.+)$",
     64   "units": "score",
     65   "tests": [
     66     {"name": "Richards"},
     67     {"name": "DeltaBlue"},
     68     {"name": "NavierStokes",
     69      "results_regexp": "^NavierStokes: (.+)$"}
     70   ]
     71 }
     72 
     73 Full example (suite with several runners):
     74 {
     75   "path": ["."],
     76   "flags": ["--expose-gc"],
     77   "archs": ["ia32", "x64"],
     78   "run_count": 5,
     79   "units": "score",
     80   "tests": [
     81     {"name": "Richards",
     82      "path": ["richards"],
     83      "main": "run.js",
     84      "run_count": 3,
     85      "results_regexp": "^Richards: (.+)$"},
     86     {"name": "NavierStokes",
     87      "path": ["navier_stokes"],
     88      "main": "run.js",
     89      "results_regexp": "^NavierStokes: (.+)$"}
     90   ]
     91 }
     92 
     93 Path pieces are concatenated. D8 is always run with the suite's path as cwd.
     94 
     95 The test flags are passed to the js test file after '--'.
     96 """
     97 
     98 from collections import OrderedDict
     99 import json
    100 import logging
    101 import math
    102 import optparse
    103 import os
    104 import re
    105 import subprocess
    106 import sys
    107 
    108 from testrunner.local import commands
    109 from testrunner.local import utils
    110 
    111 ARCH_GUESS = utils.DefaultArch()
    112 SUPPORTED_ARCHS = ["arm",
    113                    "ia32",
    114                    "mips",
    115                    "mipsel",
    116                    "nacl_ia32",
    117                    "nacl_x64",
    118                    "x64",
    119                    "arm64"]
    120 
    121 GENERIC_RESULTS_RE = re.compile(r"^RESULT ([^:]+): ([^=]+)= ([^ ]+) ([^ ]*)$")
    122 RESULT_STDDEV_RE = re.compile(r"^\{([^\}]+)\}$")
    123 RESULT_LIST_RE = re.compile(r"^\[([^\]]+)\]$")
    124 TOOLS_BASE = os.path.abspath(os.path.dirname(__file__))
    125 
    126 
    127 def LoadAndroidBuildTools(path):  # pragma: no cover
    128   assert os.path.exists(path)
    129   sys.path.insert(0, path)
    130 
    131   from pylib.device import adb_wrapper  # pylint: disable=F0401
    132   from pylib.device import device_errors  # pylint: disable=F0401
    133   from pylib.device import device_utils  # pylint: disable=F0401
    134   from pylib.perf import cache_control  # pylint: disable=F0401
    135   from pylib.perf import perf_control  # pylint: disable=F0401
    136   global adb_wrapper
    137   global cache_control
    138   global device_errors
    139   global device_utils
    140   global perf_control
    141 
    142 
    143 def GeometricMean(values):
    144   """Returns the geometric mean of a list of values.
    145 
    146   The mean is calculated using log to avoid overflow.
    147   """
    148   values = map(float, values)
    149   return str(math.exp(sum(map(math.log, values)) / len(values)))
    150 
    151 
    152 class Results(object):
    153   """Place holder for result traces."""
    154   def __init__(self, traces=None, errors=None):
    155     self.traces = traces or []
    156     self.errors = errors or []
    157 
    158   def ToDict(self):
    159     return {"traces": self.traces, "errors": self.errors}
    160 
    161   def WriteToFile(self, file_name):
    162     with open(file_name, "w") as f:
    163       f.write(json.dumps(self.ToDict()))
    164 
    165   def __add__(self, other):
    166     self.traces += other.traces
    167     self.errors += other.errors
    168     return self
    169 
    170   def __str__(self):  # pragma: no cover
    171     return str(self.ToDict())
    172 
    173 
    174 class Measurement(object):
    175   """Represents a series of results of one trace.
    176 
    177   The results are from repetitive runs of the same executable. They are
    178   gathered by repeated calls to ConsumeOutput.
    179   """
    180   def __init__(self, graphs, units, results_regexp, stddev_regexp):
    181     self.name = graphs[-1]
    182     self.graphs = graphs
    183     self.units = units
    184     self.results_regexp = results_regexp
    185     self.stddev_regexp = stddev_regexp
    186     self.results = []
    187     self.errors = []
    188     self.stddev = ""
    189 
    190   def ConsumeOutput(self, stdout):
    191     try:
    192       result = re.search(self.results_regexp, stdout, re.M).group(1)
    193       self.results.append(str(float(result)))
    194     except ValueError:
    195       self.errors.append("Regexp \"%s\" returned a non-numeric for test %s."
    196                          % (self.results_regexp, self.name))
    197     except:
    198       self.errors.append("Regexp \"%s\" didn't match for test %s."
    199                          % (self.results_regexp, self.name))
    200 
    201     try:
    202       if self.stddev_regexp and self.stddev:
    203         self.errors.append("Test %s should only run once since a stddev "
    204                            "is provided by the test." % self.name)
    205       if self.stddev_regexp:
    206         self.stddev = re.search(self.stddev_regexp, stdout, re.M).group(1)
    207     except:
    208       self.errors.append("Regexp \"%s\" didn't match for test %s."
    209                          % (self.stddev_regexp, self.name))
    210 
    211   def GetResults(self):
    212     return Results([{
    213       "graphs": self.graphs,
    214       "units": self.units,
    215       "results": self.results,
    216       "stddev": self.stddev,
    217     }], self.errors)
    218 
    219 
    220 class NullMeasurement(object):
    221   """Null object to avoid having extra logic for configurations that didn't
    222   run like running without patch on trybots.
    223   """
    224   def ConsumeOutput(self, stdout):
    225     pass
    226 
    227   def GetResults(self):
    228     return Results()
    229 
    230 
    231 def Unzip(iterable):
    232   left = []
    233   right = []
    234   for l, r in iterable:
    235     left.append(l)
    236     right.append(r)
    237   return lambda: iter(left), lambda: iter(right)
    238 
    239 
    240 def AccumulateResults(
    241     graph_names, trace_configs, iter_output, trybot, no_patch, calc_total):
    242   """Iterates over the output of multiple benchmark reruns and accumulates
    243   results for a configured list of traces.
    244 
    245   Args:
    246     graph_names: List of names that configure the base path of the traces. E.g.
    247                  ['v8', 'Octane'].
    248     trace_configs: List of "TraceConfig" instances. Each trace config defines
    249                    how to perform a measurement.
    250     iter_output: Iterator over the standard output of each test run.
    251     trybot: Indicates that this is run in trybot mode, i.e. run twice, once
    252             with once without patch.
    253     no_patch: Indicates weather this is a trybot run without patch.
    254     calc_total: Boolean flag to speficy the calculation of a summary trace.
    255   Returns: A "Results" object.
    256   """
    257   measurements = [
    258     trace.CreateMeasurement(trybot, no_patch) for trace in trace_configs]
    259   for stdout in iter_output():
    260     for measurement in measurements:
    261       measurement.ConsumeOutput(stdout)
    262 
    263   res = reduce(lambda r, m: r + m.GetResults(), measurements, Results())
    264 
    265   if not res.traces or not calc_total:
    266     return res
    267 
    268   # Assume all traces have the same structure.
    269   if len(set(map(lambda t: len(t["results"]), res.traces))) != 1:
    270     res.errors.append("Not all traces have the same number of results.")
    271     return res
    272 
    273   # Calculate the geometric means for all traces. Above we made sure that
    274   # there is at least one trace and that the number of results is the same
    275   # for each trace.
    276   n_results = len(res.traces[0]["results"])
    277   total_results = [GeometricMean(t["results"][i] for t in res.traces)
    278                    for i in range(0, n_results)]
    279   res.traces.append({
    280     "graphs": graph_names + ["Total"],
    281     "units": res.traces[0]["units"],
    282     "results": total_results,
    283     "stddev": "",
    284   })
    285   return res
    286 
    287 
    288 def AccumulateGenericResults(graph_names, suite_units, iter_output):
    289   """Iterates over the output of multiple benchmark reruns and accumulates
    290   generic results.
    291 
    292   Args:
    293     graph_names: List of names that configure the base path of the traces. E.g.
    294                  ['v8', 'Octane'].
    295     suite_units: Measurement default units as defined by the benchmark suite.
    296     iter_output: Iterator over the standard output of each test run.
    297   Returns: A "Results" object.
    298   """
    299   traces = OrderedDict()
    300   for stdout in iter_output():
    301     if stdout is None:
    302       # The None value is used as a null object to simplify logic.
    303       continue
    304     for line in stdout.strip().splitlines():
    305       match = GENERIC_RESULTS_RE.match(line)
    306       if match:
    307         stddev = ""
    308         graph = match.group(1)
    309         trace = match.group(2)
    310         body = match.group(3)
    311         units = match.group(4)
    312         match_stddev = RESULT_STDDEV_RE.match(body)
    313         match_list = RESULT_LIST_RE.match(body)
    314         errors = []
    315         if match_stddev:
    316           result, stddev = map(str.strip, match_stddev.group(1).split(","))
    317           results = [result]
    318         elif match_list:
    319           results = map(str.strip, match_list.group(1).split(","))
    320         else:
    321           results = [body.strip()]
    322 
    323         try:
    324           results = map(lambda r: str(float(r)), results)
    325         except ValueError:
    326           results = []
    327           errors = ["Found non-numeric in %s" %
    328                     "/".join(graph_names + [graph, trace])]
    329 
    330         trace_result = traces.setdefault(trace, Results([{
    331           "graphs": graph_names + [graph, trace],
    332           "units": (units or suite_units).strip(),
    333           "results": [],
    334           "stddev": "",
    335         }], errors))
    336         trace_result.traces[0]["results"].extend(results)
    337         trace_result.traces[0]["stddev"] = stddev
    338 
    339   return reduce(lambda r, t: r + t, traces.itervalues(), Results())
    340 
    341 
    342 class Node(object):
    343   """Represents a node in the suite tree structure."""
    344   def __init__(self, *args):
    345     self._children = []
    346 
    347   def AppendChild(self, child):
    348     self._children.append(child)
    349 
    350 
    351 class DefaultSentinel(Node):
    352   """Fake parent node with all default values."""
    353   def __init__(self, binary = "d8"):
    354     super(DefaultSentinel, self).__init__()
    355     self.binary = binary
    356     self.run_count = 10
    357     self.timeout = 60
    358     self.path = []
    359     self.graphs = []
    360     self.flags = []
    361     self.test_flags = []
    362     self.resources = []
    363     self.results_regexp = None
    364     self.stddev_regexp = None
    365     self.units = "score"
    366     self.total = False
    367 
    368 
    369 class GraphConfig(Node):
    370   """Represents a suite definition.
    371 
    372   Can either be a leaf or an inner node that provides default values.
    373   """
    374   def __init__(self, suite, parent, arch):
    375     super(GraphConfig, self).__init__()
    376     self._suite = suite
    377 
    378     assert isinstance(suite.get("path", []), list)
    379     assert isinstance(suite["name"], basestring)
    380     assert isinstance(suite.get("flags", []), list)
    381     assert isinstance(suite.get("test_flags", []), list)
    382     assert isinstance(suite.get("resources", []), list)
    383 
    384     # Accumulated values.
    385     self.path = parent.path[:] + suite.get("path", [])
    386     self.graphs = parent.graphs[:] + [suite["name"]]
    387     self.flags = parent.flags[:] + suite.get("flags", [])
    388     self.test_flags = parent.test_flags[:] + suite.get("test_flags", [])
    389 
    390     # Values independent of parent node.
    391     self.resources = suite.get("resources", [])
    392 
    393     # Descrete values (with parent defaults).
    394     self.binary = suite.get("binary", parent.binary)
    395     self.run_count = suite.get("run_count", parent.run_count)
    396     self.run_count = suite.get("run_count_%s" % arch, self.run_count)
    397     self.timeout = suite.get("timeout", parent.timeout)
    398     self.timeout = suite.get("timeout_%s" % arch, self.timeout)
    399     self.units = suite.get("units", parent.units)
    400     self.total = suite.get("total", parent.total)
    401 
    402     # A regular expression for results. If the parent graph provides a
    403     # regexp and the current suite has none, a string place holder for the
    404     # suite name is expected.
    405     # TODO(machenbach): Currently that makes only sense for the leaf level.
    406     # Multiple place holders for multiple levels are not supported.
    407     if parent.results_regexp:
    408       regexp_default = parent.results_regexp % re.escape(suite["name"])
    409     else:
    410       regexp_default = None
    411     self.results_regexp = suite.get("results_regexp", regexp_default)
    412 
    413     # A similar regular expression for the standard deviation (optional).
    414     if parent.stddev_regexp:
    415       stddev_default = parent.stddev_regexp % re.escape(suite["name"])
    416     else:
    417       stddev_default = None
    418     self.stddev_regexp = suite.get("stddev_regexp", stddev_default)
    419 
    420 
    421 class TraceConfig(GraphConfig):
    422   """Represents a leaf in the suite tree structure."""
    423   def __init__(self, suite, parent, arch):
    424     super(TraceConfig, self).__init__(suite, parent, arch)
    425     assert self.results_regexp
    426 
    427   def CreateMeasurement(self, trybot, no_patch):
    428     if not trybot and no_patch:
    429       # Use null object for no-patch logic if this is not a trybot run.
    430       return NullMeasurement()
    431 
    432     return Measurement(
    433         self.graphs,
    434         self.units,
    435         self.results_regexp,
    436         self.stddev_regexp,
    437     )
    438 
    439 
    440 class RunnableConfig(GraphConfig):
    441   """Represents a runnable suite definition (i.e. has a main file).
    442   """
    443   @property
    444   def main(self):
    445     return self._suite.get("main", "")
    446 
    447   def ChangeCWD(self, suite_path):
    448     """Changes the cwd to to path defined in the current graph.
    449 
    450     The tests are supposed to be relative to the suite configuration.
    451     """
    452     suite_dir = os.path.abspath(os.path.dirname(suite_path))
    453     bench_dir = os.path.normpath(os.path.join(*self.path))
    454     os.chdir(os.path.join(suite_dir, bench_dir))
    455 
    456   def GetCommandFlags(self, extra_flags=None):
    457     suffix = ["--"] + self.test_flags if self.test_flags else []
    458     return self.flags + (extra_flags or []) + [self.main] + suffix
    459 
    460   def GetCommand(self, shell_dir, extra_flags=None):
    461     # TODO(machenbach): This requires +.exe if run on windows.
    462     extra_flags = extra_flags or []
    463     cmd = [os.path.join(shell_dir, self.binary)]
    464     if self.binary != 'd8' and '--prof' in extra_flags:
    465       print "Profiler supported only on a benchmark run with d8"
    466     return cmd + self.GetCommandFlags(extra_flags=extra_flags)
    467 
    468   def Run(self, runner, trybot):
    469     """Iterates over several runs and handles the output for all traces."""
    470     stdout_with_patch, stdout_no_patch = Unzip(runner())
    471     return (
    472         AccumulateResults(
    473             self.graphs,
    474             self._children,
    475             iter_output=stdout_with_patch,
    476             trybot=trybot,
    477             no_patch=False,
    478             calc_total=self.total,
    479         ),
    480         AccumulateResults(
    481             self.graphs,
    482             self._children,
    483             iter_output=stdout_no_patch,
    484             trybot=trybot,
    485             no_patch=True,
    486             calc_total=self.total,
    487         ),
    488     )
    489 
    490 
    491 class RunnableTraceConfig(TraceConfig, RunnableConfig):
    492   """Represents a runnable suite definition that is a leaf."""
    493   def __init__(self, suite, parent, arch):
    494     super(RunnableTraceConfig, self).__init__(suite, parent, arch)
    495 
    496   def Run(self, runner, trybot):
    497     """Iterates over several runs and handles the output."""
    498     measurement_with_patch = self.CreateMeasurement(trybot, False)
    499     measurement_no_patch = self.CreateMeasurement(trybot, True)
    500     for stdout_with_patch, stdout_no_patch in runner():
    501       measurement_with_patch.ConsumeOutput(stdout_with_patch)
    502       measurement_no_patch.ConsumeOutput(stdout_no_patch)
    503     return (
    504         measurement_with_patch.GetResults(),
    505         measurement_no_patch.GetResults(),
    506     )
    507 
    508 
    509 class RunnableGenericConfig(RunnableConfig):
    510   """Represents a runnable suite definition with generic traces."""
    511   def __init__(self, suite, parent, arch):
    512     super(RunnableGenericConfig, self).__init__(suite, parent, arch)
    513 
    514   def Run(self, runner, trybot):
    515     stdout_with_patch, stdout_no_patch = Unzip(runner())
    516     return (
    517         AccumulateGenericResults(self.graphs, self.units, stdout_with_patch),
    518         AccumulateGenericResults(self.graphs, self.units, stdout_no_patch),
    519     )
    520 
    521 
    522 def MakeGraphConfig(suite, arch, parent):
    523   """Factory method for making graph configuration objects."""
    524   if isinstance(parent, RunnableConfig):
    525     # Below a runnable can only be traces.
    526     return TraceConfig(suite, parent, arch)
    527   elif suite.get("main") is not None:
    528     # A main file makes this graph runnable. Empty strings are accepted.
    529     if suite.get("tests"):
    530       # This graph has subgraphs (traces).
    531       return RunnableConfig(suite, parent, arch)
    532     else:
    533       # This graph has no subgraphs, it's a leaf.
    534       return RunnableTraceConfig(suite, parent, arch)
    535   elif suite.get("generic"):
    536     # This is a generic suite definition. It is either a runnable executable
    537     # or has a main js file.
    538     return RunnableGenericConfig(suite, parent, arch)
    539   elif suite.get("tests"):
    540     # This is neither a leaf nor a runnable.
    541     return GraphConfig(suite, parent, arch)
    542   else:  # pragma: no cover
    543     raise Exception("Invalid suite configuration.")
    544 
    545 
    546 def BuildGraphConfigs(suite, arch, parent):
    547   """Builds a tree structure of graph objects that corresponds to the suite
    548   configuration.
    549   """
    550 
    551   # TODO(machenbach): Implement notion of cpu type?
    552   if arch not in suite.get("archs", SUPPORTED_ARCHS):
    553     return None
    554 
    555   graph = MakeGraphConfig(suite, arch, parent)
    556   for subsuite in suite.get("tests", []):
    557     BuildGraphConfigs(subsuite, arch, graph)
    558   parent.AppendChild(graph)
    559   return graph
    560 
    561 
    562 def FlattenRunnables(node, node_cb):
    563   """Generator that traverses the tree structure and iterates over all
    564   runnables.
    565   """
    566   node_cb(node)
    567   if isinstance(node, RunnableConfig):
    568     yield node
    569   elif isinstance(node, Node):
    570     for child in node._children:
    571       for result in FlattenRunnables(child, node_cb):
    572         yield result
    573   else:  # pragma: no cover
    574     raise Exception("Invalid suite configuration.")
    575 
    576 
    577 class Platform(object):
    578   def __init__(self, options):
    579     self.shell_dir = options.shell_dir
    580     self.shell_dir_no_patch = options.shell_dir_no_patch
    581     self.extra_flags = options.extra_flags.split()
    582 
    583   @staticmethod
    584   def GetPlatform(options):
    585     if options.android_build_tools:
    586       return AndroidPlatform(options)
    587     else:
    588       return DesktopPlatform(options)
    589 
    590   def _Run(self, runnable, count, no_patch=False):
    591     raise NotImplementedError()  # pragma: no cover
    592 
    593   def Run(self, runnable, count):
    594     """Execute the benchmark's main file.
    595 
    596     If options.shell_dir_no_patch is specified, the benchmark is run once with
    597     and once without patch.
    598     Args:
    599       runnable: A Runnable benchmark instance.
    600       count: The number of this (repeated) run.
    601     Returns: A tuple with the benchmark outputs with and without patch. The
    602              latter will be None if options.shell_dir_no_patch was not
    603              specified.
    604     """
    605     stdout = self._Run(runnable, count, no_patch=False)
    606     if self.shell_dir_no_patch:
    607       return stdout, self._Run(runnable, count, no_patch=True)
    608     else:
    609       return stdout, None
    610 
    611 
    612 class DesktopPlatform(Platform):
    613   def __init__(self, options):
    614     super(DesktopPlatform, self).__init__(options)
    615     self.command_prefix = []
    616 
    617     if options.prioritize or options.affinitize != None:
    618       self.command_prefix = ["schedtool"]
    619       if options.prioritize:
    620         self.command_prefix += ["-n", "-20"]
    621       if options.affinitize != None:
    622       # schedtool expects a bit pattern when setting affinity, where each
    623       # bit set to '1' corresponds to a core where the process may run on.
    624       # First bit corresponds to CPU 0. Since the 'affinitize' parameter is
    625       # a core number, we need to map to said bit pattern.
    626         cpu = int(options.affinitize)
    627         core = 1 << cpu
    628         self.command_prefix += ["-a", ("0x%x" % core)]
    629       self.command_prefix += ["-e"]
    630 
    631   def PreExecution(self):
    632     pass
    633 
    634   def PostExecution(self):
    635     pass
    636 
    637   def PreTests(self, node, path):
    638     if isinstance(node, RunnableConfig):
    639       node.ChangeCWD(path)
    640 
    641   def _Run(self, runnable, count, no_patch=False):
    642     suffix = ' - without patch' if no_patch else ''
    643     shell_dir = self.shell_dir_no_patch if no_patch else self.shell_dir
    644     title = ">>> %%s (#%d)%s:" % ((count + 1), suffix)
    645     command = self.command_prefix + runnable.GetCommand(shell_dir,
    646                                                         self.extra_flags)
    647     try:
    648       output = commands.Execute(
    649         command,
    650         timeout=runnable.timeout,
    651       )
    652     except OSError as e:  # pragma: no cover
    653       print title % "OSError"
    654       print e
    655       return ""
    656 
    657     print title % "Stdout"
    658     print output.stdout
    659     if output.stderr:  # pragma: no cover
    660       # Print stderr for debugging.
    661       print title % "Stderr"
    662       print output.stderr
    663     if output.timed_out:
    664       print ">>> Test timed out after %ss." % runnable.timeout
    665     if '--prof' in self.extra_flags:
    666       os_prefix = {"linux": "linux", "macos": "mac"}.get(utils.GuessOS())
    667       if os_prefix:
    668         tick_tools = os.path.join(TOOLS_BASE, "%s-tick-processor" % os_prefix)
    669         subprocess.check_call(tick_tools + " --only-summary", shell=True)
    670       else:  # pragma: no cover
    671         print "Profiler option currently supported on Linux and Mac OS."
    672     return output.stdout
    673 
    674 
    675 class AndroidPlatform(Platform):  # pragma: no cover
    676   DEVICE_DIR = "/data/local/tmp/v8/"
    677 
    678   def __init__(self, options):
    679     super(AndroidPlatform, self).__init__(options)
    680     LoadAndroidBuildTools(options.android_build_tools)
    681 
    682     if not options.device:
    683       # Detect attached device if not specified.
    684       devices = adb_wrapper.AdbWrapper.Devices()
    685       assert devices and len(devices) == 1, (
    686           "None or multiple devices detected. Please specify the device on "
    687           "the command-line with --device")
    688       options.device = str(devices[0])
    689     self.adb_wrapper = adb_wrapper.AdbWrapper(options.device)
    690     self.device = device_utils.DeviceUtils(self.adb_wrapper)
    691 
    692   def PreExecution(self):
    693     perf = perf_control.PerfControl(self.device)
    694     perf.SetHighPerfMode()
    695 
    696     # Remember what we have already pushed to the device.
    697     self.pushed = set()
    698 
    699   def PostExecution(self):
    700     perf = perf_control.PerfControl(self.device)
    701     perf.SetDefaultPerfMode()
    702     self.device.RunShellCommand(["rm", "-rf", AndroidPlatform.DEVICE_DIR])
    703 
    704   def _PushFile(self, host_dir, file_name, target_rel=".",
    705                 skip_if_missing=False):
    706     file_on_host = os.path.join(host_dir, file_name)
    707     file_on_device_tmp = os.path.join(
    708         AndroidPlatform.DEVICE_DIR, "_tmp_", file_name)
    709     file_on_device = os.path.join(
    710         AndroidPlatform.DEVICE_DIR, target_rel, file_name)
    711     folder_on_device = os.path.dirname(file_on_device)
    712 
    713     # Only attempt to push files that exist.
    714     if not os.path.exists(file_on_host):
    715       if not skip_if_missing:
    716         logging.critical('Missing file on host: %s' % file_on_host)
    717       return
    718 
    719     # Only push files not yet pushed in one execution.
    720     if file_on_host in self.pushed:
    721       return
    722     else:
    723       self.pushed.add(file_on_host)
    724 
    725     # Work-around for "text file busy" errors. Push the files to a temporary
    726     # location and then copy them with a shell command.
    727     output = self.adb_wrapper.Push(file_on_host, file_on_device_tmp)
    728     # Success looks like this: "3035 KB/s (12512056 bytes in 4.025s)".
    729     # Errors look like this: "failed to copy  ... ".
    730     if output and not re.search('^[0-9]', output.splitlines()[-1]):
    731       logging.critical('PUSH FAILED: ' + output)
    732     self.adb_wrapper.Shell("mkdir -p %s" % folder_on_device)
    733     self.adb_wrapper.Shell("cp %s %s" % (file_on_device_tmp, file_on_device))
    734 
    735   def _PushExecutable(self, shell_dir, target_dir, binary):
    736     self._PushFile(shell_dir, binary, target_dir)
    737 
    738     # Push external startup data. Backwards compatible for revisions where
    739     # these files didn't exist.
    740     self._PushFile(
    741         shell_dir,
    742         "natives_blob.bin",
    743         target_dir,
    744         skip_if_missing=True,
    745     )
    746     self._PushFile(
    747         shell_dir,
    748         "snapshot_blob.bin",
    749         target_dir,
    750         skip_if_missing=True,
    751     )
    752     self._PushFile(
    753         shell_dir,
    754         "snapshot_blob_ignition.bin",
    755         target_dir,
    756         skip_if_missing=True,
    757     )
    758 
    759   def PreTests(self, node, path):
    760     suite_dir = os.path.abspath(os.path.dirname(path))
    761     if node.path:
    762       bench_rel = os.path.normpath(os.path.join(*node.path))
    763       bench_abs = os.path.join(suite_dir, bench_rel)
    764     else:
    765       bench_rel = "."
    766       bench_abs = suite_dir
    767 
    768     self._PushExecutable(self.shell_dir, "bin", node.binary)
    769     if self.shell_dir_no_patch:
    770       self._PushExecutable(
    771           self.shell_dir_no_patch, "bin_no_patch", node.binary)
    772 
    773     if isinstance(node, RunnableConfig):
    774       self._PushFile(bench_abs, node.main, bench_rel)
    775     for resource in node.resources:
    776       self._PushFile(bench_abs, resource, bench_rel)
    777 
    778   def _Run(self, runnable, count, no_patch=False):
    779     suffix = ' - without patch' if no_patch else ''
    780     target_dir = "bin_no_patch" if no_patch else "bin"
    781     title = ">>> %%s (#%d)%s:" % ((count + 1), suffix)
    782     cache = cache_control.CacheControl(self.device)
    783     cache.DropRamCaches()
    784     binary_on_device = os.path.join(
    785         AndroidPlatform.DEVICE_DIR, target_dir, runnable.binary)
    786     cmd = [binary_on_device] + runnable.GetCommandFlags(self.extra_flags)
    787 
    788     # Relative path to benchmark directory.
    789     if runnable.path:
    790       bench_rel = os.path.normpath(os.path.join(*runnable.path))
    791     else:
    792       bench_rel = "."
    793 
    794     try:
    795       output = self.device.RunShellCommand(
    796           cmd,
    797           cwd=os.path.join(AndroidPlatform.DEVICE_DIR, bench_rel),
    798           timeout=runnable.timeout,
    799           retries=0,
    800       )
    801       stdout = "\n".join(output)
    802       print title % "Stdout"
    803       print stdout
    804     except device_errors.CommandTimeoutError:
    805       print ">>> Test timed out after %ss." % runnable.timeout
    806       stdout = ""
    807     return stdout
    808 
    809 class CustomMachineConfiguration:
    810   def __init__(self, disable_aslr = False, governor = None):
    811     self.aslr_backup = None
    812     self.governor_backup = None
    813     self.disable_aslr = disable_aslr
    814     self.governor = governor
    815 
    816   def __enter__(self):
    817     if self.disable_aslr:
    818       self.aslr_backup = CustomMachineConfiguration.GetASLR()
    819       CustomMachineConfiguration.SetASLR(0)
    820     if self.governor != None:
    821       self.governor_backup = CustomMachineConfiguration.GetCPUGovernor()
    822       CustomMachineConfiguration.SetCPUGovernor(self.governor)
    823     return self
    824 
    825   def __exit__(self, type, value, traceback):
    826     if self.aslr_backup != None:
    827       CustomMachineConfiguration.SetASLR(self.aslr_backup)
    828     if self.governor_backup != None:
    829       CustomMachineConfiguration.SetCPUGovernor(self.governor_backup)
    830 
    831   @staticmethod
    832   def GetASLR():
    833     try:
    834       with open("/proc/sys/kernel/randomize_va_space", "r") as f:
    835         return int(f.readline().strip())
    836     except Exception as e:
    837       print "Failed to get current ASLR settings."
    838       raise e
    839 
    840   @staticmethod
    841   def SetASLR(value):
    842     try:
    843       with open("/proc/sys/kernel/randomize_va_space", "w") as f:
    844         f.write(str(value))
    845     except Exception as e:
    846       print "Failed to update ASLR to %s." % value
    847       print "Are we running under sudo?"
    848       raise e
    849 
    850     new_value = CustomMachineConfiguration.GetASLR()
    851     if value != new_value:
    852       raise Exception("Present value is %s" % new_value)
    853 
    854   @staticmethod
    855   def GetCPUCoresRange():
    856     try:
    857       with open("/sys/devices/system/cpu/present", "r") as f:
    858         indexes = f.readline()
    859         r = map(int, indexes.split("-"))
    860         if len(r) == 1:
    861           return range(r[0], r[0] + 1)
    862         return range(r[0], r[1] + 1)
    863     except Exception as e:
    864       print "Failed to retrieve number of CPUs."
    865       raise e
    866 
    867   @staticmethod
    868   def GetCPUPathForId(cpu_index):
    869     ret = "/sys/devices/system/cpu/cpu"
    870     ret += str(cpu_index)
    871     ret += "/cpufreq/scaling_governor"
    872     return ret
    873 
    874   @staticmethod
    875   def GetCPUGovernor():
    876     try:
    877       cpu_indices = CustomMachineConfiguration.GetCPUCoresRange()
    878       ret = None
    879       for cpu_index in cpu_indices:
    880         cpu_device = CustomMachineConfiguration.GetCPUPathForId(cpu_index)
    881         with open(cpu_device, "r") as f:
    882           # We assume the governors of all CPUs are set to the same value
    883           val = f.readline().strip()
    884           if ret == None:
    885             ret = val
    886           elif ret != val:
    887             raise Exception("CPU cores have differing governor settings")
    888       return ret
    889     except Exception as e:
    890       print "Failed to get the current CPU governor."
    891       print "Is the CPU governor disabled? Check BIOS."
    892       raise e
    893 
    894   @staticmethod
    895   def SetCPUGovernor(value):
    896     try:
    897       cpu_indices = CustomMachineConfiguration.GetCPUCoresRange()
    898       for cpu_index in cpu_indices:
    899         cpu_device = CustomMachineConfiguration.GetCPUPathForId(cpu_index)
    900         with open(cpu_device, "w") as f:
    901           f.write(value)
    902 
    903     except Exception as e:
    904       print "Failed to change CPU governor to %s." % value
    905       print "Are we running under sudo?"
    906       raise e
    907 
    908     cur_value = CustomMachineConfiguration.GetCPUGovernor()
    909     if cur_value != value:
    910       raise Exception("Could not set CPU governor. Present value is %s"
    911                       % cur_value )
    912 
    913 # TODO: Implement results_processor.
    914 def Main(args):
    915   logging.getLogger().setLevel(logging.INFO)
    916   parser = optparse.OptionParser()
    917   parser.add_option("--android-build-tools",
    918                     help="Path to chromium's build/android. Specifying this "
    919                          "option will run tests using android platform.")
    920   parser.add_option("--arch",
    921                     help=("The architecture to run tests for, "
    922                           "'auto' or 'native' for auto-detect"),
    923                     default="x64")
    924   parser.add_option("--buildbot",
    925                     help="Adapt to path structure used on buildbots",
    926                     default=False, action="store_true")
    927   parser.add_option("--device",
    928                     help="The device ID to run Android tests on. If not given "
    929                          "it will be autodetected.")
    930   parser.add_option("--extra-flags",
    931                     help="Additional flags to pass to the test executable",
    932                     default="")
    933   parser.add_option("--json-test-results",
    934                     help="Path to a file for storing json results.")
    935   parser.add_option("--json-test-results-no-patch",
    936                     help="Path to a file for storing json results from run "
    937                          "without patch.")
    938   parser.add_option("--outdir", help="Base directory with compile output",
    939                     default="out")
    940   parser.add_option("--outdir-no-patch",
    941                     help="Base directory with compile output without patch")
    942   parser.add_option("--binary-override-path",
    943                     help="JavaScript engine binary. By default, d8 under "
    944                     "architecture-specific build dir. "
    945                     "Not supported in conjunction with outdir-no-patch.")
    946   parser.add_option("--prioritize",
    947                     help="Raise the priority to nice -20 for the benchmarking "
    948                     "process.Requires Linux, schedtool, and sudo privileges.",
    949                     default=False, action="store_true")
    950   parser.add_option("--affinitize",
    951                     help="Run benchmarking process on the specified core. "
    952                     "For example: "
    953                     "--affinitize=0 will run the benchmark process on core 0. "
    954                     "--affinitize=3 will run the benchmark process on core 3. "
    955                     "Requires Linux, schedtool, and sudo privileges.",
    956                     default=None)
    957   parser.add_option("--noaslr",
    958                     help="Disable ASLR for the duration of the benchmarked "
    959                     "process. Requires Linux and sudo privileges.",
    960                     default=False, action="store_true")
    961   parser.add_option("--cpu-governor",
    962                     help="Set cpu governor to specified policy for the "
    963                     "duration of the benchmarked process. Typical options: "
    964                     "'powersave' for more stable results, or 'performance' "
    965                     "for shorter completion time of suite, with potentially "
    966                     "more noise in results.")
    967 
    968   (options, args) = parser.parse_args(args)
    969 
    970   if len(args) == 0:  # pragma: no cover
    971     parser.print_help()
    972     return 1
    973 
    974   if options.arch in ["auto", "native"]:  # pragma: no cover
    975     options.arch = ARCH_GUESS
    976 
    977   if not options.arch in SUPPORTED_ARCHS:  # pragma: no cover
    978     print "Unknown architecture %s" % options.arch
    979     return 1
    980 
    981   if options.device and not options.android_build_tools:  # pragma: no cover
    982     print "Specifying a device requires Android build tools."
    983     return 1
    984 
    985   if (options.json_test_results_no_patch and
    986       not options.outdir_no_patch):  # pragma: no cover
    987     print("For writing json test results without patch, an outdir without "
    988           "patch must be specified.")
    989     return 1
    990 
    991   workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
    992 
    993   if options.buildbot:
    994     build_config = "Release"
    995   else:
    996     build_config = "%s.release" % options.arch
    997 
    998   if options.binary_override_path == None:
    999     options.shell_dir = os.path.join(workspace, options.outdir, build_config)
   1000     default_binary_name = "d8"
   1001   else:
   1002     if not os.path.isfile(options.binary_override_path):
   1003       print "binary-override-path must be a file name"
   1004       return 1
   1005     if options.outdir_no_patch:
   1006       print "specify either binary-override-path or outdir-no-patch"
   1007       return 1
   1008     options.shell_dir = os.path.dirname(options.binary_override_path)
   1009     default_binary_name = os.path.basename(options.binary_override_path)
   1010 
   1011   if options.outdir_no_patch:
   1012     options.shell_dir_no_patch = os.path.join(
   1013         workspace, options.outdir_no_patch, build_config)
   1014   else:
   1015     options.shell_dir_no_patch = None
   1016 
   1017   prev_aslr = None
   1018   prev_cpu_gov = None
   1019   platform = Platform.GetPlatform(options)
   1020 
   1021   results = Results()
   1022   results_no_patch = Results()
   1023   with CustomMachineConfiguration(governor = options.cpu_governor,
   1024                                   disable_aslr = options.noaslr) as conf:
   1025     for path in args:
   1026       path = os.path.abspath(path)
   1027 
   1028       if not os.path.exists(path):  # pragma: no cover
   1029         results.errors.append("Configuration file %s does not exist." % path)
   1030         continue
   1031 
   1032       with open(path) as f:
   1033         suite = json.loads(f.read())
   1034 
   1035       # If no name is given, default to the file name without .json.
   1036       suite.setdefault("name", os.path.splitext(os.path.basename(path))[0])
   1037 
   1038       # Setup things common to one test suite.
   1039       platform.PreExecution()
   1040 
   1041       # Build the graph/trace tree structure.
   1042       default_parent = DefaultSentinel(default_binary_name)
   1043       root = BuildGraphConfigs(suite, options.arch, default_parent)
   1044 
   1045       # Callback to be called on each node on traversal.
   1046       def NodeCB(node):
   1047         platform.PreTests(node, path)
   1048 
   1049       # Traverse graph/trace tree and interate over all runnables.
   1050       for runnable in FlattenRunnables(root, NodeCB):
   1051         print ">>> Running suite: %s" % "/".join(runnable.graphs)
   1052 
   1053         def Runner():
   1054           """Output generator that reruns several times."""
   1055           for i in xrange(0, max(1, runnable.run_count)):
   1056             # TODO(machenbach): Allow timeout per arch like with run_count per
   1057             # arch.
   1058             yield platform.Run(runnable, i)
   1059 
   1060         # Let runnable iterate over all runs and handle output.
   1061         result, result_no_patch = runnable.Run(
   1062           Runner, trybot=options.shell_dir_no_patch)
   1063         results += result
   1064         results_no_patch += result_no_patch
   1065       platform.PostExecution()
   1066 
   1067     if options.json_test_results:
   1068       results.WriteToFile(options.json_test_results)
   1069     else:  # pragma: no cover
   1070       print results
   1071 
   1072   if options.json_test_results_no_patch:
   1073     results_no_patch.WriteToFile(options.json_test_results_no_patch)
   1074   else:  # pragma: no cover
   1075     print results_no_patch
   1076 
   1077   return min(1, len(results.errors))
   1078 
   1079 if __name__ == "__main__":  # pragma: no cover
   1080   sys.exit(Main(sys.argv[1:]))
   1081