Home | History | Annotate | Download | only in crosperf
      1 # Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
      2 # Use of this source code is governed by a BSD-style license that can be
      3 # found in the LICENSE file.
      4 """A module to generate experiments."""
      5 
      6 from __future__ import print_function
      7 import os
      8 import re
      9 import socket
     10 
     11 from benchmark import Benchmark
     12 import config
     13 from experiment import Experiment
     14 from label import Label
     15 from label import MockLabel
     16 from results_cache import CacheConditions
     17 import test_flag
     18 import file_lock_machine
     19 
     20 # Users may want to run Telemetry tests either individually, or in
     21 # specified sets.  Here we define sets of tests that users may want
     22 # to run together.
     23 
     24 telemetry_perfv2_tests = [
     25     'dromaeo.domcoreattr', 'dromaeo.domcoremodify', 'dromaeo.domcorequery',
     26     'dromaeo.domcoretraverse', 'kraken', 'octane', 'robohornet_pro', 'sunspider'
     27 ]
     28 
     29 telemetry_pagecycler_tests = [
     30     'page_cycler_v2.intl_ar_fa_he',
     31     'page_cycler_v2.intl_es_fr_pt-BR',
     32     'page_cycler_v2.intl_hi_ru',
     33     'page_cycler_v2.intl_ja_zh',
     34     'page_cycler_v2.intl_ko_th_vi',
     35     #                              'page_cycler_v2.morejs',
     36     #                              'page_cycler_v2.moz',
     37     #                              'page_cycler_v2.netsim.top_10',
     38     'page_cycler_v2.tough_layout_cases',
     39     'page_cycler_v2.typical_25'
     40 ]
     41 
     42 telemetry_toolchain_old_perf_tests = [
     43     'dromaeo.domcoremodify', 'page_cycler_v2.intl_es_fr_pt-BR',
     44     'page_cycler_v2.intl_hi_ru', 'page_cycler_v2.intl_ja_zh',
     45     'page_cycler_v2.intl_ko_th_vi', 'page_cycler_v2.netsim.top_10',
     46     'page_cycler_v2.typical_25', 'robohornet_pro', 'spaceport',
     47     'tab_switching.top_10'
     48 ]
     49 telemetry_toolchain_perf_tests = [
     50     'octane',
     51     'kraken',
     52     'speedometer',
     53     'dromaeo.domcoreattr',
     54     'dromaeo.domcoremodify',
     55     'smoothness.tough_webgl_cases',
     56 ]
     57 graphics_perf_tests = [
     58     'graphics_GLBench',
     59     'graphics_GLMark2',
     60     'graphics_SanAngeles',
     61     'graphics_WebGLAquarium',
     62     'graphics_WebGLPerformance',
     63 ]
     64 telemetry_crosbolt_perf_tests = [
     65     'octane',
     66     'kraken',
     67     'speedometer',
     68     'jetstream',
     69     'startup.cold.blank_page',
     70     'smoothness.top_25_smooth',
     71 ]
     72 crosbolt_perf_tests = [
     73     'graphics_WebGLAquarium',
     74     'video_PlaybackPerf.h264',
     75     'video_PlaybackPerf.vp9',
     76     'video_WebRtcPerf',
     77     'BootPerfServerCrosPerf',
     78     'power_Resume',
     79     'video_PlaybackPerf.h264',
     80     'build_RootFilesystemSize',
     81 ]
     82 
     83 #    'cheets_AntutuTest',
     84 #    'cheets_PerfBootServer',
     85 #    'cheets_CandyCrushTest',
     86 #    'cheets_LinpackTest',
     87 #]
     88 
     89 
     90 class ExperimentFactory(object):
     91   """Factory class for building an Experiment, given an ExperimentFile as input.
     92 
     93   This factory is currently hardcoded to produce an experiment for running
     94   ChromeOS benchmarks, but the idea is that in the future, other types
     95   of experiments could be produced.
     96   """
     97 
     98   def AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args,
     99                          iterations, rm_chroot_tmp, perf_args, suite,
    100                          show_all_results, retries, run_local):
    101     """Add all the tests in a set to the benchmarks list."""
    102     for test_name in benchmark_list:
    103       telemetry_benchmark = Benchmark(
    104           test_name, test_name, test_args, iterations, rm_chroot_tmp, perf_args,
    105           suite, show_all_results, retries, run_local)
    106       benchmarks.append(telemetry_benchmark)
    107 
    108   def GetExperiment(self, experiment_file, working_directory, log_dir):
    109     """Construct an experiment from an experiment file."""
    110     global_settings = experiment_file.GetGlobalSettings()
    111     experiment_name = global_settings.GetField('name')
    112     board = global_settings.GetField('board')
    113     remote = global_settings.GetField('remote')
    114     # This is used to remove the ",' from the remote if user
    115     # add them to the remote string.
    116     new_remote = []
    117     if remote:
    118       for i in remote:
    119         c = re.sub('["\']', '', i)
    120         new_remote.append(c)
    121     remote = new_remote
    122     chromeos_root = global_settings.GetField('chromeos_root')
    123     rm_chroot_tmp = global_settings.GetField('rm_chroot_tmp')
    124     perf_args = global_settings.GetField('perf_args')
    125     acquire_timeout = global_settings.GetField('acquire_timeout')
    126     cache_dir = global_settings.GetField('cache_dir')
    127     cache_only = global_settings.GetField('cache_only')
    128     config.AddConfig('no_email', global_settings.GetField('no_email'))
    129     share_cache = global_settings.GetField('share_cache')
    130     results_dir = global_settings.GetField('results_dir')
    131     use_file_locks = global_settings.GetField('use_file_locks')
    132     locks_dir = global_settings.GetField('locks_dir')
    133     # If we pass a blank locks_dir to the Experiment, it will use the AFE server
    134     # lock mechanism.  So if the user specified use_file_locks, but did not
    135     # specify a locks dir, set the locks  dir to the default locks dir in
    136     # file_lock_machine.
    137     if use_file_locks and not locks_dir:
    138       locks_dir = file_lock_machine.Machine.LOCKS_DIR
    139     chrome_src = global_settings.GetField('chrome_src')
    140     show_all_results = global_settings.GetField('show_all_results')
    141     log_level = global_settings.GetField('logging_level')
    142     if log_level not in ('quiet', 'average', 'verbose'):
    143       log_level = 'verbose'
    144     # Default cache hit conditions. The image checksum in the cache and the
    145     # computed checksum of the image must match. Also a cache file must exist.
    146     cache_conditions = [
    147         CacheConditions.CACHE_FILE_EXISTS, CacheConditions.CHECKSUMS_MATCH
    148     ]
    149     if global_settings.GetField('rerun_if_failed'):
    150       cache_conditions.append(CacheConditions.RUN_SUCCEEDED)
    151     if global_settings.GetField('rerun'):
    152       cache_conditions.append(CacheConditions.FALSE)
    153     if global_settings.GetField('same_machine'):
    154       cache_conditions.append(CacheConditions.SAME_MACHINE_MATCH)
    155     if global_settings.GetField('same_specs'):
    156       cache_conditions.append(CacheConditions.MACHINES_MATCH)
    157 
    158     # Construct benchmarks.
    159     # Some fields are common with global settings. The values are
    160     # inherited and/or merged with the global settings values.
    161     benchmarks = []
    162     all_benchmark_settings = experiment_file.GetSettings('benchmark')
    163     for benchmark_settings in all_benchmark_settings:
    164       benchmark_name = benchmark_settings.name
    165       test_name = benchmark_settings.GetField('test_name')
    166       if not test_name:
    167         test_name = benchmark_name
    168       test_args = benchmark_settings.GetField('test_args')
    169       iterations = benchmark_settings.GetField('iterations')
    170       suite = benchmark_settings.GetField('suite')
    171       retries = benchmark_settings.GetField('retries')
    172       run_local = benchmark_settings.GetField('run_local')
    173 
    174       if suite == 'telemetry_Crosperf':
    175         if test_name == 'all_perfv2':
    176           self.AppendBenchmarkSet(benchmarks, telemetry_perfv2_tests, test_args,
    177                                   iterations, rm_chroot_tmp, perf_args, suite,
    178                                   show_all_results, retries, run_local)
    179         elif test_name == 'all_pagecyclers':
    180           self.AppendBenchmarkSet(benchmarks, telemetry_pagecycler_tests,
    181                                   test_args, iterations, rm_chroot_tmp,
    182                                   perf_args, suite, show_all_results, retries,
    183                                   run_local)
    184         elif test_name == 'all_toolchain_perf':
    185           self.AppendBenchmarkSet(benchmarks, telemetry_toolchain_perf_tests,
    186                                   test_args, iterations, rm_chroot_tmp,
    187                                   perf_args, suite, show_all_results, retries,
    188                                   run_local)
    189           # Add non-telemetry toolchain-perf benchmarks:
    190           benchmarks.append(
    191               Benchmark(
    192                   'graphics_WebGLAquarium',
    193                   'graphics_WebGLAquarium',
    194                   '',
    195                   iterations,
    196                   rm_chroot_tmp,
    197                   perf_args,
    198                   '',
    199                   show_all_results,
    200                   retries,
    201                   run_local=False))
    202         elif test_name == 'all_toolchain_perf_old':
    203           self.AppendBenchmarkSet(benchmarks,
    204                                   telemetry_toolchain_old_perf_tests, test_args,
    205                                   iterations, rm_chroot_tmp, perf_args, suite,
    206                                   show_all_results, retries, run_local)
    207         else:
    208           benchmark = Benchmark(test_name, test_name, test_args, iterations,
    209                                 rm_chroot_tmp, perf_args, suite,
    210                                 show_all_results, retries, run_local)
    211           benchmarks.append(benchmark)
    212       else:
    213         if test_name == 'all_graphics_perf':
    214           self.AppendBenchmarkSet(
    215               benchmarks,
    216               graphics_perf_tests,
    217               '',
    218               iterations,
    219               rm_chroot_tmp,
    220               perf_args,
    221               '',
    222               show_all_results,
    223               retries,
    224               run_local=False)
    225         elif test_name == 'all_crosbolt_perf':
    226           self.AppendBenchmarkSet(benchmarks, telemetry_crosbolt_perf_tests,
    227                                   test_args, iterations, rm_chroot_tmp,
    228                                   perf_args, 'telemetry_Crosperf',
    229                                   show_all_results, retries, run_local)
    230           self.AppendBenchmarkSet(
    231               benchmarks,
    232               crosbolt_perf_tests,
    233               '',
    234               iterations,
    235               rm_chroot_tmp,
    236               perf_args,
    237               '',
    238               show_all_results,
    239               retries,
    240               run_local=False)
    241         else:
    242           # Add the single benchmark.
    243           benchmark = Benchmark(
    244               benchmark_name,
    245               test_name,
    246               test_args,
    247               iterations,
    248               rm_chroot_tmp,
    249               perf_args,
    250               suite,
    251               show_all_results,
    252               retries,
    253               run_local=False)
    254           benchmarks.append(benchmark)
    255 
    256     if not benchmarks:
    257       raise RuntimeError('No benchmarks specified')
    258 
    259     # Construct labels.
    260     # Some fields are common with global settings. The values are
    261     # inherited and/or merged with the global settings values.
    262     labels = []
    263     all_label_settings = experiment_file.GetSettings('label')
    264     all_remote = list(remote)
    265     for label_settings in all_label_settings:
    266       label_name = label_settings.name
    267       image = label_settings.GetField('chromeos_image')
    268       autotest_path = label_settings.GetField('autotest_path')
    269       chromeos_root = label_settings.GetField('chromeos_root')
    270       my_remote = label_settings.GetField('remote')
    271       compiler = label_settings.GetField('compiler')
    272       new_remote = []
    273       if my_remote:
    274         for i in my_remote:
    275           c = re.sub('["\']', '', i)
    276           new_remote.append(c)
    277       my_remote = new_remote
    278       if image == '':
    279         build = label_settings.GetField('build')
    280         if len(build) == 0:
    281           raise RuntimeError("Can not have empty 'build' field!")
    282         image, autotest_path = label_settings.GetXbuddyPath(
    283             build, autotest_path, board, chromeos_root, log_level)
    284 
    285       cache_dir = label_settings.GetField('cache_dir')
    286       chrome_src = label_settings.GetField('chrome_src')
    287 
    288       # TODO(yunlian): We should consolidate code in machine_manager.py
    289       # to derermine whether we are running from within google or not
    290       if ('corp.google.com' in socket.gethostname() and
    291           (not my_remote or
    292            my_remote == remote and global_settings.GetField('board') != board)):
    293         my_remote = self.GetDefaultRemotes(board)
    294       if global_settings.GetField('same_machine') and len(my_remote) > 1:
    295         raise RuntimeError('Only one remote is allowed when same_machine '
    296                            'is turned on')
    297       all_remote += my_remote
    298       image_args = label_settings.GetField('image_args')
    299       if test_flag.GetTestMode():
    300         # pylint: disable=too-many-function-args
    301         label = MockLabel(label_name, image, autotest_path, chromeos_root,
    302                           board, my_remote, image_args, cache_dir, cache_only,
    303                           log_level, compiler, chrome_src)
    304       else:
    305         label = Label(label_name, image, autotest_path, chromeos_root, board,
    306                       my_remote, image_args, cache_dir, cache_only, log_level,
    307                       compiler, chrome_src)
    308       labels.append(label)
    309 
    310     if not labels:
    311       raise RuntimeError('No labels specified')
    312 
    313     email = global_settings.GetField('email')
    314     all_remote += list(set(my_remote))
    315     all_remote = list(set(all_remote))
    316     experiment = Experiment(experiment_name, all_remote, working_directory,
    317                             chromeos_root, cache_conditions, labels, benchmarks,
    318                             experiment_file.Canonicalize(), email,
    319                             acquire_timeout, log_dir, log_level, share_cache,
    320                             results_dir, locks_dir)
    321 
    322     return experiment
    323 
    324   def GetDefaultRemotes(self, board):
    325     default_remotes_file = os.path.join(
    326         os.path.dirname(__file__), 'default_remotes')
    327     try:
    328       with open(default_remotes_file) as f:
    329         for line in f:
    330           key, v = line.split(':')
    331           if key.strip() == board:
    332             remotes = v.strip().split()
    333             if remotes:
    334               return remotes
    335             else:
    336               raise RuntimeError('There is no remote for {0}'.format(board))
    337     except IOError:
    338       # TODO: rethrow instead of throwing different exception.
    339       raise RuntimeError('IOError while reading file {0}'
    340                          .format(default_remotes_file))
    341     else:
    342       raise RuntimeError('There is not remote for {0}'.format(board))
    343