Home | History | Annotate | Download | only in telemetry
      1 # Copyright 2013 The Chromium Authors. All rights reserved.
      2 # Use of this source code is governed by a BSD-style license that can be
      3 # found in the LICENSE file.
      4 
      5 """Parses the command line, discovers the appropriate benchmarks, and runs them.
      6 
      7 Handles benchmark configuration, but all the logic for
      8 actually running the benchmark is in Benchmark and PageRunner."""
      9 
     10 import argparse
     11 import json
     12 import logging
     13 import os
     14 import sys
     15 
     16 from telemetry import benchmark
     17 from telemetry.core import discover
     18 from telemetry import decorators
     19 from telemetry.internal.browser import browser_finder
     20 from telemetry.internal.browser import browser_options
     21 from telemetry.internal.util import binary_manager
     22 from telemetry.internal.util import command_line
     23 from telemetry.internal.util import ps_util
     24 from telemetry.util import matching
     25 from telemetry.util import bot_utils
     26 
     27 
     28 # Right now, we only have one of each of our power perf bots. This means that
     29 # all eligible Telemetry benchmarks are run unsharded, which results in very
     30 # long (12h) cycle times. We'd like to reduce the number of tests that we run
     31 # on each bot drastically until we get more of the same hardware to shard tests
     32 # with, but we can't do so until we've verified that the hardware configuration
     33 # is a viable one for Chrome Telemetry tests. This is done by seeing at least
     34 # one all-green test run. As this happens for each bot, we'll add it to this
     35 # whitelist, making it eligible to run only BattOr power tests.
     36 GOOD_POWER_PERF_BOT_WHITELIST = [
     37   "Mac Power Dual-GPU Perf",
     38   "Mac Power Low-End Perf"
     39 ]
     40 
     41 
     42 DEFAULT_LOG_FORMAT = (
     43   '(%(levelname)s) %(asctime)s %(module)s.%(funcName)s:%(lineno)d  '
     44   '%(message)s')
     45 
     46 
     47 def _IsBenchmarkEnabled(benchmark_class, possible_browser):
     48   return (issubclass(benchmark_class, benchmark.Benchmark) and
     49           decorators.IsBenchmarkEnabled(benchmark_class, possible_browser))
     50 
     51 
     52 def PrintBenchmarkList(benchmarks, possible_browser, output_pipe=sys.stdout):
     53   """ Print benchmarks that are not filtered in the same order of benchmarks in
     54   the |benchmarks| list.
     55 
     56   Args:
     57     benchmarks: the list of benchmarks to be printed (in the same order of the
     58       list).
     59     possible_browser: the possible_browser instance that's used for checking
     60       which benchmarks are enabled.
     61     output_pipe: the stream in which benchmarks are printed on.
     62   """
     63   if not benchmarks:
     64     print >> output_pipe, 'No benchmarks found!'
     65     return
     66 
     67   bad_benchmark = next(
     68     (b for b in benchmarks if not issubclass(b, benchmark.Benchmark)), None)
     69   assert bad_benchmark is None, (
     70     '|benchmarks| param contains non benchmark class: %s' % bad_benchmark)
     71 
     72   # Align the benchmark names to the longest one.
     73   format_string = '  %%-%ds %%s' % max(len(b.Name()) for b in benchmarks)
     74   disabled_benchmarks = []
     75 
     76   print >> output_pipe, 'Available benchmarks %sare:' % (
     77       'for %s ' % possible_browser.browser_type if possible_browser else '')
     78 
     79   # Sort the benchmarks by benchmark name.
     80   benchmarks = sorted(benchmarks, key=lambda b: b.Name())
     81   for b in benchmarks:
     82     if not possible_browser or _IsBenchmarkEnabled(b, possible_browser):
     83       print >> output_pipe, format_string % (b.Name(), b.Description())
     84     else:
     85       disabled_benchmarks.append(b)
     86 
     87   if disabled_benchmarks:
     88     print >> output_pipe, (
     89         '\nDisabled benchmarks for %s are (force run with -d):' %
     90         possible_browser.browser_type)
     91     for b in disabled_benchmarks:
     92       print >> output_pipe, format_string % (b.Name(), b.Description())
     93   print >> output_pipe, (
     94       'Pass --browser to list benchmarks for another browser.\n')
     95 
     96 
     97 class Help(command_line.OptparseCommand):
     98   """Display help information about a command"""
     99 
    100   usage = '[command]'
    101 
    102   def __init__(self, commands):
    103     self._all_commands = commands
    104 
    105   def Run(self, args):
    106     if len(args.positional_args) == 1:
    107       commands = _MatchingCommands(args.positional_args[0], self._all_commands)
    108       if len(commands) == 1:
    109         command = commands[0]
    110         parser = command.CreateParser()
    111         command.AddCommandLineArgs(parser, None)
    112         parser.print_help()
    113         return 0
    114 
    115     print >> sys.stderr, ('usage: %s [command] [<options>]' % _ScriptName())
    116     print >> sys.stderr, 'Available commands are:'
    117     for command in self._all_commands:
    118       print >> sys.stderr, '  %-10s %s' % (
    119           command.Name(), command.Description())
    120     print >> sys.stderr, ('"%s help <command>" to see usage information '
    121                           'for a specific command.' % _ScriptName())
    122     return 0
    123 
    124 
    125 class List(command_line.OptparseCommand):
    126   """Lists the available benchmarks"""
    127 
    128   usage = '[benchmark_name] [<options>]'
    129 
    130   @classmethod
    131   def CreateParser(cls):
    132     options = browser_options.BrowserFinderOptions()
    133     parser = options.CreateParser('%%prog %s %s' % (cls.Name(), cls.usage))
    134     return parser
    135 
    136   @classmethod
    137   def AddCommandLineArgs(cls, parser, _):
    138     parser.add_option('-j', '--json-output-file', type='string')
    139     parser.add_option('-n', '--num-shards', type='int', default=1)
    140 
    141   @classmethod
    142   def ProcessCommandLineArgs(cls, parser, args, environment):
    143     if not args.positional_args:
    144       args.benchmarks = _Benchmarks(environment)
    145     elif len(args.positional_args) == 1:
    146       args.benchmarks = _MatchBenchmarkName(args.positional_args[0],
    147                                             environment, exact_matches=False)
    148     else:
    149       parser.error('Must provide at most one benchmark name.')
    150 
    151   def Run(self, args):
    152     # Set at least log info level for List command.
    153     # TODO(nedn): remove this once crbug.com/656224 is resolved. The recipe
    154     # should be change to use verbose logging instead.
    155     logging.getLogger().setLevel(logging.INFO)
    156     possible_browser = browser_finder.FindBrowser(args)
    157     if args.browser_type in (
    158         'release', 'release_x64', 'debug', 'debug_x64', 'canary',
    159         'android-chromium', 'android-chrome'):
    160       args.browser_type = 'reference'
    161       possible_reference_browser = browser_finder.FindBrowser(args)
    162     else:
    163       possible_reference_browser = None
    164     if args.json_output_file:
    165       with open(args.json_output_file, 'w') as f:
    166         f.write(_GetJsonBenchmarkList(possible_browser,
    167                                       possible_reference_browser,
    168                                       args.benchmarks, args.num_shards))
    169     else:
    170       PrintBenchmarkList(args.benchmarks, possible_browser)
    171     return 0
    172 
    173 
    174 class Run(command_line.OptparseCommand):
    175   """Run one or more benchmarks (default)"""
    176 
    177   usage = 'benchmark_name [page_set] [<options>]'
    178 
    179   @classmethod
    180   def CreateParser(cls):
    181     options = browser_options.BrowserFinderOptions()
    182     parser = options.CreateParser('%%prog %s %s' % (cls.Name(), cls.usage))
    183     return parser
    184 
    185   @classmethod
    186   def AddCommandLineArgs(cls, parser, environment):
    187     benchmark.AddCommandLineArgs(parser)
    188 
    189     # Allow benchmarks to add their own command line options.
    190     matching_benchmarks = []
    191     for arg in sys.argv[1:]:
    192       matching_benchmarks += _MatchBenchmarkName(arg, environment)
    193 
    194     if matching_benchmarks:
    195       # TODO(dtu): After move to argparse, add command-line args for all
    196       # benchmarks to subparser. Using subparsers will avoid duplicate
    197       # arguments.
    198       matching_benchmark = matching_benchmarks.pop()
    199       matching_benchmark.AddCommandLineArgs(parser)
    200       # The benchmark's options override the defaults!
    201       matching_benchmark.SetArgumentDefaults(parser)
    202 
    203   @classmethod
    204   def ProcessCommandLineArgs(cls, parser, args, environment):
    205     all_benchmarks = _Benchmarks(environment)
    206     if not args.positional_args:
    207       possible_browser = (
    208           browser_finder.FindBrowser(args) if args.browser_type else None)
    209       PrintBenchmarkList(all_benchmarks, possible_browser)
    210       sys.exit(-1)
    211 
    212     input_benchmark_name = args.positional_args[0]
    213     matching_benchmarks = _MatchBenchmarkName(input_benchmark_name, environment)
    214     if not matching_benchmarks:
    215       print >> sys.stderr, 'No benchmark named "%s".' % input_benchmark_name
    216       print >> sys.stderr
    217       most_likely_matched_benchmarks = matching.GetMostLikelyMatchedObject(
    218           all_benchmarks, input_benchmark_name, lambda x: x.Name())
    219       if most_likely_matched_benchmarks:
    220         print >> sys.stderr, 'Do you mean any of those benchmarks below?'
    221         PrintBenchmarkList(most_likely_matched_benchmarks, None, sys.stderr)
    222       sys.exit(-1)
    223 
    224     if len(matching_benchmarks) > 1:
    225       print >> sys.stderr, ('Multiple benchmarks named "%s".' %
    226                             input_benchmark_name)
    227       print >> sys.stderr, 'Did you mean one of these?'
    228       print >> sys.stderr
    229       PrintBenchmarkList(matching_benchmarks, None, sys.stderr)
    230       sys.exit(-1)
    231 
    232     benchmark_class = matching_benchmarks.pop()
    233     if len(args.positional_args) > 1:
    234       parser.error('Too many arguments.')
    235 
    236     assert issubclass(benchmark_class, benchmark.Benchmark), (
    237         'Trying to run a non-Benchmark?!')
    238 
    239     benchmark.ProcessCommandLineArgs(parser, args)
    240     benchmark_class.ProcessCommandLineArgs(parser, args)
    241 
    242     cls._benchmark = benchmark_class
    243 
    244   def Run(self, args):
    245     return min(255, self._benchmark().Run(args))
    246 
    247 
    248 def _ScriptName():
    249   return os.path.basename(sys.argv[0])
    250 
    251 
    252 def _MatchingCommands(string, commands):
    253   return [command for command in commands
    254          if command.Name().startswith(string)]
    255 
    256 @decorators.Cache
    257 def _Benchmarks(environment):
    258   benchmarks = []
    259   for search_dir in environment.benchmark_dirs:
    260     benchmarks += discover.DiscoverClasses(search_dir,
    261                                            environment.top_level_dir,
    262                                            benchmark.Benchmark,
    263                                            index_by_class_name=True).values()
    264   return benchmarks
    265 
    266 def _MatchBenchmarkName(input_benchmark_name, environment, exact_matches=True):
    267   def _Matches(input_string, search_string):
    268     if search_string.startswith(input_string):
    269       return True
    270     for part in search_string.split('.'):
    271       if part.startswith(input_string):
    272         return True
    273     return False
    274 
    275   # Exact matching.
    276   if exact_matches:
    277     # Don't add aliases to search dict, only allow exact matching for them.
    278     if input_benchmark_name in environment.benchmark_aliases:
    279       exact_match = environment.benchmark_aliases[input_benchmark_name]
    280     else:
    281       exact_match = input_benchmark_name
    282 
    283     for benchmark_class in _Benchmarks(environment):
    284       if exact_match == benchmark_class.Name():
    285         return [benchmark_class]
    286     return []
    287 
    288   # Fuzzy matching.
    289   return [benchmark_class for benchmark_class in _Benchmarks(environment)
    290           if _Matches(input_benchmark_name, benchmark_class.Name())]
    291 
    292 
    293 def GetBenchmarkByName(name, environment):
    294   matched = _MatchBenchmarkName(name, environment, exact_matches=True)
    295   # With exact_matches, len(matched) is either 0 or 1.
    296   if len(matched) == 0:
    297     return None
    298   return matched[0]
    299 
    300 
    301 def _GetJsonBenchmarkList(possible_browser, possible_reference_browser,
    302                           benchmark_classes, num_shards):
    303   """Returns a list of all enabled benchmarks in a JSON format expected by
    304   buildbots.
    305 
    306   JSON format:
    307   { "version": <int>,
    308     "steps": {
    309       <string>: {
    310         "device_affinity": <int>,
    311         "cmd": <string>,
    312         "perf_dashboard_id": <string>,
    313       },
    314       ...
    315     }
    316   }
    317   """
    318   # TODO(charliea): Remove this once we have more power perf bots.
    319   only_run_battor_benchmarks = False
    320   print 'Environment variables: ', os.environ
    321   if os.environ.get('BUILDBOT_BUILDERNAME') in GOOD_POWER_PERF_BOT_WHITELIST:
    322     only_run_battor_benchmarks = True
    323 
    324   output = {
    325     'version': 1,
    326     'steps': {
    327     }
    328   }
    329   for benchmark_class in benchmark_classes:
    330     if not _IsBenchmarkEnabled(benchmark_class, possible_browser):
    331       continue
    332 
    333     base_name = benchmark_class.Name()
    334     # TODO(charliea): Remove this once we have more power perf bots.
    335     # Only run battor power benchmarks to reduce the cycle time of this bot.
    336     # TODO(rnephew): Enable media.* and power.* tests when Mac BattOr issue
    337     # is solved.
    338     if only_run_battor_benchmarks and not base_name.startswith('battor'):
    339       continue
    340     base_cmd = [sys.executable, os.path.realpath(sys.argv[0]),
    341                 '-v', '--output-format=chartjson', '--upload-results',
    342                 base_name]
    343     perf_dashboard_id = base_name
    344 
    345     device_affinity = bot_utils.GetDeviceAffinity(num_shards, base_name)
    346 
    347     output['steps'][base_name] = {
    348       'cmd': ' '.join(base_cmd + [
    349             '--browser=%s' % possible_browser.browser_type]),
    350       'device_affinity': device_affinity,
    351       'perf_dashboard_id': perf_dashboard_id,
    352     }
    353     if (possible_reference_browser and
    354         _IsBenchmarkEnabled(benchmark_class, possible_reference_browser)):
    355       output['steps'][base_name + '.reference'] = {
    356         'cmd': ' '.join(base_cmd + [
    357               '--browser=reference', '--output-trace-tag=_ref']),
    358         'device_affinity': device_affinity,
    359         'perf_dashboard_id': perf_dashboard_id,
    360       }
    361 
    362   return json.dumps(output, indent=2, sort_keys=True)
    363 
    364 
    365 def main(environment, extra_commands=None, **log_config_kwargs):
    366   # The log level is set in browser_options.
    367   log_config_kwargs.pop('level', None)
    368   log_config_kwargs.setdefault('format', DEFAULT_LOG_FORMAT)
    369   logging.basicConfig(**log_config_kwargs)
    370 
    371   ps_util.EnableListingStrayProcessesUponExitHook()
    372 
    373   # Get the command name from the command line.
    374   if len(sys.argv) > 1 and sys.argv[1] == '--help':
    375     sys.argv[1] = 'help'
    376 
    377   command_name = 'run'
    378   for arg in sys.argv[1:]:
    379     if not arg.startswith('-'):
    380       command_name = arg
    381       break
    382 
    383   # TODO(eakuefner): Remove this hack after we port to argparse.
    384   if command_name == 'help' and len(sys.argv) > 2 and sys.argv[2] == 'run':
    385     command_name = 'run'
    386     sys.argv[2] = '--help'
    387 
    388   if extra_commands is None:
    389     extra_commands = []
    390   all_commands = [Help, List, Run] + extra_commands
    391 
    392   # Validate and interpret the command name.
    393   commands = _MatchingCommands(command_name, all_commands)
    394   if len(commands) > 1:
    395     print >> sys.stderr, ('"%s" is not a %s command. Did you mean one of these?'
    396                           % (command_name, _ScriptName()))
    397     for command in commands:
    398       print >> sys.stderr, '  %-10s %s' % (
    399           command.Name(), command.Description())
    400     return 1
    401   if commands:
    402     command = commands[0]
    403   else:
    404     command = Run
    405 
    406   binary_manager.InitDependencyManager(environment.client_configs)
    407 
    408   # Parse and run the command.
    409   parser = command.CreateParser()
    410   command.AddCommandLineArgs(parser, environment)
    411 
    412   # Set the default chrome root variable.
    413   parser.set_defaults(chrome_root=environment.default_chrome_root)
    414 
    415 
    416   if isinstance(parser, argparse.ArgumentParser):
    417     commandline_args = sys.argv[1:]
    418     options, args = parser.parse_known_args(commandline_args[1:])
    419     command.ProcessCommandLineArgs(parser, options, args, environment)
    420   else:
    421     options, args = parser.parse_args()
    422     if commands:
    423       args = args[1:]
    424     options.positional_args = args
    425     command.ProcessCommandLineArgs(parser, options, environment)
    426 
    427   if command == Help:
    428     command_instance = command(all_commands)
    429   else:
    430     command_instance = command()
    431   if isinstance(command_instance, command_line.OptparseCommand):
    432     return command_instance.Run(options)
    433   else:
    434     return command_instance.Run(options, args)
    435