Home | History | Annotate | Download | only in functional
      1 #!/usr/bin/env python
      2 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
      3 # Use of this source code is governed by a BSD-style license that can be
      4 # found in the LICENSE file.
      5 
      6 """Basic pyauto performance tests.
      7 
      8 For tests that need to be run for multiple iterations (e.g., so that average
      9 and standard deviation values can be reported), the default number of iterations
     10 run for each of these tests is specified by |_DEFAULT_NUM_ITERATIONS|.
     11 That value can optionally be tweaked by setting an environment variable
     12 'NUM_ITERATIONS' to a positive integer, representing the number of iterations
     13 to run.  An additional, initial iteration will also be run to "warm up" the
     14 environment, and the result from that initial iteration will be ignored.
     15 
     16 Some tests rely on repeatedly appending tabs to Chrome.  Occasionally, these
     17 automation calls time out, thereby affecting the timing measurements (see issue
     18 crosbug.com/20503).  To work around this, the tests discard timing measurements
     19 that involve automation timeouts.  The value |_DEFAULT_MAX_TIMEOUT_COUNT|
     20 specifies the threshold number of timeouts that can be tolerated before the test
     21 fails.  To tweak this value, set environment variable 'MAX_TIMEOUT_COUNT' to the
     22 desired threshold value.
     23 """
     24 
     25 import BaseHTTPServer
     26 import commands
     27 import errno
     28 import itertools
     29 import logging
     30 import math
     31 import os
     32 import posixpath
     33 import re
     34 import SimpleHTTPServer
     35 import SocketServer
     36 import signal
     37 import subprocess
     38 import sys
     39 import tempfile
     40 import threading
     41 import time
     42 import timeit
     43 import urllib
     44 import urllib2
     45 import urlparse
     46 
     47 import pyauto_functional  # Must be imported before pyauto.
     48 import pyauto
     49 import simplejson  # Must be imported after pyauto; located in third_party.
     50 
     51 from netflix import NetflixTestHelper
     52 import pyauto_utils
     53 import test_utils
     54 import webpagereplay
     55 from youtube import YoutubeTestHelper
     56 
     57 
     58 _CHROME_BASE_DIR = os.path.abspath(os.path.join(
     59     os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, os.pardir))
     60 
     61 
     62 def FormatChromePath(posix_path, **kwargs):
     63   """Convert a path relative to the Chromium root into an OS-specific path.
     64 
     65   Args:
     66     posix_path: a path string that may be a format().
     67       Example: 'src/third_party/{module_name}/__init__.py'
     68     kwargs: args for the format replacement.
     69       Example: {'module_name': 'pylib'}
     70 
     71   Returns:
     72     an absolute path in the current Chromium tree with formatting applied.
     73   """
     74   formated_path = posix_path.format(**kwargs)
     75   path_parts = formated_path.split('/')
     76   return os.path.join(_CHROME_BASE_DIR, *path_parts)
     77 
     78 
     79 def StandardDeviation(values):
     80   """Returns the standard deviation of |values|."""
     81   avg = Mean(values)
     82   if len(values) < 2 or not avg:
     83     return 0.0
     84   temp_vals = [math.pow(x - avg, 2) for x in values]
     85   return math.sqrt(sum(temp_vals) / (len(temp_vals) - 1))
     86 
     87 
     88 def Mean(values):
     89   """Returns the arithmetic mean of |values|."""
     90   if not values or None in values:
     91     return None
     92   return sum(values) / float(len(values))
     93 
     94 
     95 def GeometricMean(values):
     96   """Returns the geometric mean of |values|."""
     97   if not values or None in values or [x for x in values if x < 0.0]:
     98     return None
     99   if 0.0 in values:
    100     return 0.0
    101   return math.exp(Mean([math.log(x) for x in values]))
    102 
    103 
    104 class BasePerfTest(pyauto.PyUITest):
    105   """Base class for performance tests."""
    106 
    107   _DEFAULT_NUM_ITERATIONS = 10  # Keep synced with desktopui_PyAutoPerfTests.py.
    108   _DEFAULT_MAX_TIMEOUT_COUNT = 10
    109   _PERF_OUTPUT_MARKER_PRE = '_PERF_PRE_'
    110   _PERF_OUTPUT_MARKER_POST = '_PERF_POST_'
    111 
    112   def setUp(self):
    113     """Performs necessary setup work before running each test."""
    114     self._num_iterations = self._DEFAULT_NUM_ITERATIONS
    115     if 'NUM_ITERATIONS' in os.environ:
    116       self._num_iterations = int(os.environ['NUM_ITERATIONS'])
    117     self._max_timeout_count = self._DEFAULT_MAX_TIMEOUT_COUNT
    118     if 'MAX_TIMEOUT_COUNT' in os.environ:
    119       self._max_timeout_count = int(os.environ['MAX_TIMEOUT_COUNT'])
    120     self._timeout_count = 0
    121 
    122     # For users who want to see local perf graphs for Chrome when running the
    123     # tests on their own machines.
    124     self._local_perf_dir = None
    125     if 'LOCAL_PERF_DIR' in os.environ:
    126       self._local_perf_dir = os.environ['LOCAL_PERF_DIR']
    127       if not os.path.exists(self._local_perf_dir):
    128         self.fail('LOCAL_PERF_DIR environment variable specified as %s, '
    129                   'but this directory does not exist.' % self._local_perf_dir)
    130     # When outputting perf graph information on-the-fly for Chrome, this
    131     # variable lets us know whether a perf measurement is for a new test
    132     # execution, or the current test execution.
    133     self._seen_graph_lines = {}
    134 
    135     pyauto.PyUITest.setUp(self)
    136 
    137     # Flush all buffers to disk and wait until system calms down.  Must be done
    138     # *after* calling pyauto.PyUITest.setUp, since that is where Chrome is
    139     # killed and re-initialized for a new test.
    140     # TODO(dennisjeffrey): Implement wait for idle CPU on Windows/Mac.
    141     if self.IsLinux():  # IsLinux() also implies IsChromeOS().
    142       os.system('sync')
    143       self._WaitForIdleCPU(60.0, 0.05)
    144 
    145   def _IsPIDRunning(self, pid):
    146     """Checks if a given process id is running.
    147 
    148     Args:
    149       pid: The process id of the process to check.
    150 
    151     Returns:
    152       True if the process is running. False if not.
    153     """
    154     try:
    155       # Note that this sends the signal 0, which should not interfere with the
    156       # process.
    157       os.kill(pid, 0)
    158     except OSError, err:
    159       if err.errno == errno.ESRCH:
    160         return False
    161 
    162     try:
    163       with open('/proc/%s/status' % pid) as proc_file:
    164         if 'zombie' in proc_file.read():
    165           return False
    166     except IOError:
    167       return False
    168     return True
    169 
    170   def _GetAllDescendentProcesses(self, pid):
    171     pstree_out = subprocess.check_output(['pstree', '-p', '%s' % pid])
    172     children = re.findall('\((\d+)\)', pstree_out)
    173     return [int(pid) for pid in children]
    174 
    175   def _WaitForChromeExit(self, browser_info, timeout):
    176     pid = browser_info['browser_pid']
    177     chrome_pids = self._GetAllDescendentProcesses(pid)
    178     initial_time = time.time()
    179     while time.time() - initial_time < timeout:
    180       if any([self._IsPIDRunning(pid) for pid in chrome_pids]):
    181         time.sleep(1)
    182       else:
    183         logging.info('_WaitForChromeExit() took: %s seconds',
    184                      time.time() - initial_time)
    185         return
    186     self.fail('_WaitForChromeExit() did not finish within %s seconds' %
    187               timeout)
    188 
    189   def tearDown(self):
    190     if self._IsPGOMode():
    191       browser_info = self.GetBrowserInfo()
    192       pid = browser_info['browser_pid']
    193       # session_manager kills chrome without waiting for it to cleanly exit.
    194       # Until that behavior is changed, we stop it and wait for Chrome to exit
    195       # cleanly before restarting it. See:
    196       # crbug.com/264717
    197       subprocess.call(['sudo', 'pkill', '-STOP', 'session_manager'])
    198       os.kill(pid, signal.SIGINT)
    199       self._WaitForChromeExit(browser_info, 120)
    200       subprocess.call(['sudo', 'pkill', '-CONT', 'session_manager'])
    201 
    202     pyauto.PyUITest.tearDown(self)
    203 
    204   def _IsPGOMode(self):
    205     return 'USE_PGO' in os.environ
    206 
    207   def _WaitForIdleCPU(self, timeout, utilization):
    208     """Waits for the CPU to become idle (< utilization).
    209 
    210     Args:
    211       timeout: The longest time in seconds to wait before throwing an error.
    212       utilization: The CPU usage below which the system should be considered
    213           idle (between 0 and 1.0 independent of cores/hyperthreads).
    214     """
    215     time_passed = 0.0
    216     fraction_non_idle_time = 1.0
    217     logging.info('Starting to wait up to %fs for idle CPU...', timeout)
    218     while fraction_non_idle_time >= utilization:
    219       cpu_usage_start = self._GetCPUUsage()
    220       time.sleep(2)
    221       time_passed += 2.0
    222       cpu_usage_end = self._GetCPUUsage()
    223       fraction_non_idle_time = \
    224           self._GetFractionNonIdleCPUTime(cpu_usage_start, cpu_usage_end)
    225       logging.info('Current CPU utilization = %f.', fraction_non_idle_time)
    226       if time_passed > timeout:
    227         self._LogProcessActivity()
    228         message = ('CPU did not idle after %fs wait (utilization = %f).' % (
    229                    time_passed, fraction_non_idle_time))
    230 
    231         # crosbug.com/37389
    232         if self._IsPGOMode():
    233           logging.info(message)
    234           logging.info('Still continuing because we are in PGO mode.')
    235           return
    236 
    237         self.fail(message)
    238     logging.info('Wait for idle CPU took %fs (utilization = %f).',
    239                  time_passed, fraction_non_idle_time)
    240 
    241   def _LogProcessActivity(self):
    242     """Logs the output of top on Linux/Mac/CrOS.
    243 
    244        TODO: use taskmgr or similar on Windows.
    245     """
    246     if self.IsLinux() or self.IsMac():  # IsLinux() also implies IsChromeOS().
    247       logging.info('Logging current process activity using top.')
    248       cmd = 'top -b -d1 -n1'
    249       if self.IsMac():
    250         cmd = 'top -l1'
    251       p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
    252           stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
    253       output = p.stdout.read()
    254       logging.info(output)
    255     else:
    256       logging.info('Process activity logging not implemented on this OS.')
    257 
    258   def _AppendTab(self, url):
    259     """Appends a tab and increments a counter if the automation call times out.
    260 
    261     Args:
    262       url: The string url to which the appended tab should be navigated.
    263     """
    264     if not self.AppendTab(pyauto.GURL(url)):
    265       self._timeout_count += 1
    266 
    267   def _MeasureElapsedTime(self, python_command, num_invocations=1):
    268     """Measures time (in msec) to execute a python command one or more times.
    269 
    270     Args:
    271       python_command: A callable.
    272       num_invocations: An integer number of times to invoke the given command.
    273 
    274     Returns:
    275       The time required to execute the python command the specified number of
    276       times, in milliseconds as a float.
    277     """
    278     assert callable(python_command)
    279     def RunCommand():
    280       for _ in range(num_invocations):
    281         python_command()
    282     timer = timeit.Timer(stmt=RunCommand)
    283     return timer.timeit(number=1) * 1000  # Convert seconds to milliseconds.
    284 
    285   def _OutputPerfForStandaloneGraphing(self, graph_name, description, value,
    286                                        units, units_x, is_stacked):
    287     """Outputs perf measurement data to a local folder to be graphed.
    288 
    289     This function only applies to Chrome desktop, and assumes that environment
    290     variable 'LOCAL_PERF_DIR' has been specified and refers to a valid directory
    291     on the local machine.
    292 
    293     Args:
    294       graph_name: A string name for the graph associated with this performance
    295           value.
    296       description: A string description of the performance value.  Should not
    297           include spaces.
    298       value: Either a single numeric value representing a performance
    299           measurement, or else a list of (x, y) tuples representing one or more
    300           long-running performance measurements, where 'x' is an x-axis value
    301           (such as an iteration number) and 'y' is the corresponding performance
    302           measurement.  If a list of tuples is given, then the |units_x|
    303           argument must also be specified.
    304       units: A string representing the units of the performance measurement(s).
    305           Should not include spaces.
    306       units_x: A string representing the units of the x-axis values associated
    307           with the performance measurements, such as 'iteration' if the x values
    308           are iteration numbers.  If this argument is specified, then the
    309           |value| argument must be a list of (x, y) tuples.
    310       is_stacked: True to draw a "stacked" graph.  First-come values are
    311           stacked at bottom by default.
    312     """
    313     revision_num_file = os.path.join(self._local_perf_dir, 'last_revision.dat')
    314     if os.path.exists(revision_num_file):
    315       with open(revision_num_file) as f:
    316         revision = int(f.read())
    317     else:
    318       revision = 0
    319 
    320     if not self._seen_graph_lines:
    321       # We're about to output data for a new test run.
    322       revision += 1
    323 
    324     # Update graphs.dat.
    325     existing_graphs = []
    326     graphs_file = os.path.join(self._local_perf_dir, 'graphs.dat')
    327     if os.path.exists(graphs_file):
    328       with open(graphs_file) as f:
    329         existing_graphs = simplejson.loads(f.read())
    330     is_new_graph = True
    331     for graph in existing_graphs:
    332       if graph['name'] == graph_name:
    333         is_new_graph = False
    334         break
    335     if is_new_graph:
    336       new_graph =  {
    337         'name': graph_name,
    338         'units': units,
    339         'important': False,
    340       }
    341       if units_x:
    342         new_graph['units_x'] = units_x
    343       existing_graphs.append(new_graph)
    344       with open(graphs_file, 'w') as f:
    345         f.write(simplejson.dumps(existing_graphs))
    346       os.chmod(graphs_file, 0755)
    347 
    348     # Update data file for this particular graph.
    349     existing_lines = []
    350     data_file = os.path.join(self._local_perf_dir, graph_name + '-summary.dat')
    351     if os.path.exists(data_file):
    352       with open(data_file) as f:
    353         existing_lines = f.readlines()
    354     existing_lines = map(
    355         simplejson.loads, map(lambda x: x.strip(), existing_lines))
    356 
    357     seen_key = graph_name
    358     # We assume that the first line |existing_lines[0]| is the latest.
    359     if units_x:
    360       new_line = {
    361         'rev': revision,
    362         'traces': { description: [] }
    363       }
    364       if seen_key in self._seen_graph_lines:
    365         # We've added points previously for this graph line in the current
    366         # test execution, so retrieve the original set of points specified in
    367         # the most recent revision in the data file.
    368         new_line = existing_lines[0]
    369         if not description in new_line['traces']:
    370           new_line['traces'][description] = []
    371       for x_value, y_value in value:
    372         new_line['traces'][description].append([str(x_value), str(y_value)])
    373     else:
    374       new_line = {
    375         'rev': revision,
    376         'traces': { description: [str(value), str(0.0)] }
    377       }
    378 
    379     if is_stacked:
    380       new_line['stack'] = True
    381       if 'stack_order' not in new_line:
    382         new_line['stack_order'] = []
    383       if description not in new_line['stack_order']:
    384         new_line['stack_order'].append(description)
    385 
    386     if seen_key in self._seen_graph_lines:
    387       # Update results for the most recent revision.
    388       existing_lines[0] = new_line
    389     else:
    390       # New results for a new revision.
    391       existing_lines.insert(0, new_line)
    392       self._seen_graph_lines[seen_key] = True
    393 
    394     existing_lines = map(simplejson.dumps, existing_lines)
    395     with open(data_file, 'w') as f:
    396       f.write('\n'.join(existing_lines))
    397     os.chmod(data_file, 0755)
    398 
    399     with open(revision_num_file, 'w') as f:
    400       f.write(str(revision))
    401 
    402   def _OutputPerfGraphValue(self, description, value, units,
    403                             graph_name, units_x=None, is_stacked=False):
    404     """Outputs a performance value to have it graphed on the performance bots.
    405 
    406     The output format differs, depending on whether the current platform is
    407     Chrome desktop or ChromeOS.
    408 
    409     For ChromeOS, the performance bots have a 30-character limit on the length
    410     of the key associated with a performance value.  A key on ChromeOS is
    411     considered to be of the form "units_description" (for example,
    412     "milliseconds_NewTabPage"), and is created from the |units| and
    413     |description| passed as input to this function.  Any characters beyond the
    414     length 30 limit are truncated before results are stored in the autotest
    415     database.
    416 
    417     Args:
    418       description: A string description of the performance value.  Should not
    419           include spaces.
    420       value: Either a numeric value representing a performance measurement, or
    421           a list of values to be averaged. Lists may also contain (x, y) tuples
    422           representing one or more performance measurements, where 'x' is an
    423           x-axis value (such as an iteration number) and 'y' is the
    424           corresponding performance measurement.  If a list of tuples is given,
    425           the |units_x| argument must also be specified.
    426       units: A string representing the units of the performance measurement(s).
    427           Should not include spaces.
    428       graph_name: A string name for the graph associated with this performance
    429           value.  Only used on Chrome desktop.
    430       units_x: A string representing the units of the x-axis values associated
    431           with the performance measurements, such as 'iteration' if the x values
    432           are iteration numbers.  If this argument is specified, then the
    433           |value| argument must be a list of (x, y) tuples.
    434       is_stacked: True to draw a "stacked" graph.  First-come values are
    435           stacked at bottom by default.
    436     """
    437     if (isinstance(value, list) and value[0] is not None and
    438         isinstance(value[0], tuple)):
    439       assert units_x
    440     if units_x:
    441       assert isinstance(value, list)
    442 
    443     if self.IsChromeOS():
    444       # Autotest doesn't support result lists.
    445       autotest_value = value
    446       if (isinstance(value, list) and value[0] is not None and
    447           not isinstance(value[0], tuple)):
    448         autotest_value = Mean(value)
    449 
    450       if units_x:
    451         # TODO(dennisjeffrey): Support long-running performance measurements on
    452         # ChromeOS in a way that can be graphed: crosbug.com/21881.
    453         pyauto_utils.PrintPerfResult(graph_name, description, autotest_value,
    454                                      units + ' ' + units_x)
    455       else:
    456         # Output short-running performance results in a format understood by
    457         # autotest.
    458         perf_key = '%s_%s' % (units, description)
    459         if len(perf_key) > 30:
    460           logging.warning('The description "%s" will be truncated to "%s" '
    461                           '(length 30) when added to the autotest database.',
    462                           perf_key, perf_key[:30])
    463         print '\n%s(\'%s\', %f)%s' % (self._PERF_OUTPUT_MARKER_PRE,
    464                                         perf_key, autotest_value,
    465                                         self._PERF_OUTPUT_MARKER_POST)
    466 
    467         # Also output results in the format recognized by buildbot, for cases
    468         # in which these tests are run on chromeOS through buildbot.  Since
    469         # buildbot supports result lists, it's ok for |value| to be a list here.
    470         pyauto_utils.PrintPerfResult(graph_name, description, value, units)
    471 
    472         sys.stdout.flush()
    473     else:
    474       # TODO(dmikurube): Support stacked graphs in PrintPerfResult.
    475       # See http://crbug.com/122119.
    476       if units_x:
    477         pyauto_utils.PrintPerfResult(graph_name, description, value,
    478                                      units + ' ' + units_x)
    479       else:
    480         pyauto_utils.PrintPerfResult(graph_name, description, value, units)
    481 
    482       if self._local_perf_dir:
    483         self._OutputPerfForStandaloneGraphing(
    484             graph_name, description, value, units, units_x, is_stacked)
    485 
    486   def _OutputEventForStandaloneGraphing(self, description, event_list):
    487     """Outputs event information to a local folder to be graphed.
    488 
    489     See function _OutputEventGraphValue below for a description of an event.
    490 
    491     This function only applies to Chrome Endure tests running on Chrome desktop,
    492     and assumes that environment variable 'LOCAL_PERF_DIR' has been specified
    493     and refers to a valid directory on the local machine.
    494 
    495     Args:
    496       description: A string description of the event.  Should not include
    497           spaces.
    498       event_list: A list of (x, y) tuples representing one or more events
    499           occurring during an endurance test, where 'x' is the time of the event
    500           (in seconds since the start of the test), and 'y' is a dictionary
    501           representing relevant data associated with that event (as key/value
    502           pairs).
    503     """
    504     revision_num_file = os.path.join(self._local_perf_dir, 'last_revision.dat')
    505     if os.path.exists(revision_num_file):
    506       with open(revision_num_file) as f:
    507         revision = int(f.read())
    508     else:
    509       revision = 0
    510 
    511     if not self._seen_graph_lines:
    512       # We're about to output data for a new test run.
    513       revision += 1
    514 
    515     existing_lines = []
    516     data_file = os.path.join(self._local_perf_dir, '_EVENT_-summary.dat')
    517     if os.path.exists(data_file):
    518       with open(data_file) as f:
    519         existing_lines = f.readlines()
    520     existing_lines = map(eval, map(lambda x: x.strip(), existing_lines))
    521 
    522     seen_event_type = description
    523     value_list = []
    524     if seen_event_type in self._seen_graph_lines:
    525       # We've added events previously for this event type in the current
    526       # test execution, so retrieve the original set of values specified in
    527       # the most recent revision in the data file.
    528       value_list = existing_lines[0]['events'][description]
    529     for event_time, event_data in event_list:
    530       value_list.append([str(event_time), event_data])
    531     new_events = {
    532       description: value_list
    533     }
    534 
    535     new_line = {
    536       'rev': revision,
    537       'events': new_events
    538     }
    539 
    540     if seen_event_type in self._seen_graph_lines:
    541       # Update results for the most recent revision.
    542       existing_lines[0] = new_line
    543     else:
    544       # New results for a new revision.
    545       existing_lines.insert(0, new_line)
    546       self._seen_graph_lines[seen_event_type] = True
    547 
    548     existing_lines = map(str, existing_lines)
    549     with open(data_file, 'w') as f:
    550       f.write('\n'.join(existing_lines))
    551     os.chmod(data_file, 0755)
    552 
    553     with open(revision_num_file, 'w') as f:
    554       f.write(str(revision))
    555 
    556   def _OutputEventGraphValue(self, description, event_list):
    557     """Outputs a set of events to have them graphed on the Chrome Endure bots.
    558 
    559     An "event" can be anything recorded by a performance test that occurs at
    560     particular times during a test execution.  For example, a garbage collection
    561     in the v8 heap can be considered an event.  An event is distinguished from a
    562     regular perf measurement in two ways: (1) an event is depicted differently
    563     in the performance graphs than performance measurements; (2) an event can
    564     be associated with zero or more data fields describing relevant information
    565     associated with the event.  For example, a garbage collection event will
    566     occur at a particular time, and it may be associated with data such as
    567     the number of collected bytes and/or the length of time it took to perform
    568     the garbage collection.
    569 
    570     This function only applies to Chrome Endure tests running on Chrome desktop.
    571 
    572     Args:
    573       description: A string description of the event.  Should not include
    574           spaces.
    575       event_list: A list of (x, y) tuples representing one or more events
    576           occurring during an endurance test, where 'x' is the time of the event
    577           (in seconds since the start of the test), and 'y' is a dictionary
    578           representing relevant data associated with that event (as key/value
    579           pairs).
    580     """
    581     pyauto_utils.PrintPerfResult('_EVENT_', description, event_list, '')
    582     if self._local_perf_dir:
    583       self._OutputEventForStandaloneGraphing(description, event_list)
    584 
    585   def _PrintSummaryResults(self, description, values, units, graph_name):
    586     """Logs summary measurement information.
    587 
    588     This function computes and outputs the average and standard deviation of
    589     the specified list of value measurements.  It also invokes
    590     _OutputPerfGraphValue() with the computed *average* value, to ensure the
    591     average value can be plotted in a performance graph.
    592 
    593     Args:
    594       description: A string description for the specified results.
    595       values: A list of numeric value measurements.
    596       units: A string specifying the units for the specified measurements.
    597       graph_name: A string name for the graph associated with this performance
    598           value.  Only used on Chrome desktop.
    599     """
    600     logging.info('Overall results for: %s', description)
    601     if values:
    602       logging.info('  Average: %f %s', Mean(values), units)
    603       logging.info('  Std dev: %f %s', StandardDeviation(values), units)
    604       self._OutputPerfGraphValue(description, values, units, graph_name)
    605     else:
    606       logging.info('No results to report.')
    607 
    608   def _RunNewTabTest(self, description, open_tab_command, graph_name,
    609                      num_tabs=1):
    610     """Runs a perf test that involves opening new tab(s).
    611 
    612     This helper function can be called from different tests to do perf testing
    613     with different types of tabs.  It is assumed that the |open_tab_command|
    614     will open up a single tab.
    615 
    616     Args:
    617       description: A string description of the associated tab test.
    618       open_tab_command: A callable that will open a single tab.
    619       graph_name: A string name for the performance graph associated with this
    620           test.  Only used on Chrome desktop.
    621       num_tabs: The number of tabs to open, i.e., the number of times to invoke
    622           the |open_tab_command|.
    623     """
    624     assert callable(open_tab_command)
    625 
    626     timings = []
    627     for iteration in range(self._num_iterations + 1):
    628       orig_timeout_count = self._timeout_count
    629       elapsed_time = self._MeasureElapsedTime(open_tab_command,
    630                                               num_invocations=num_tabs)
    631       # Only count the timing measurement if no automation call timed out.
    632       if self._timeout_count == orig_timeout_count:
    633         # Ignore the first iteration.
    634         if iteration:
    635           timings.append(elapsed_time)
    636           logging.info('Iteration %d of %d: %f milliseconds', iteration,
    637                        self._num_iterations, elapsed_time)
    638       self.assertTrue(self._timeout_count <= self._max_timeout_count,
    639                       msg='Test exceeded automation timeout threshold.')
    640       self.assertEqual(1 + num_tabs, self.GetTabCount(),
    641                        msg='Did not open %d new tab(s).' % num_tabs)
    642       for _ in range(num_tabs):
    643         self.CloseTab(tab_index=1)
    644 
    645     self._PrintSummaryResults(description, timings, 'milliseconds', graph_name)
    646 
    647   def _GetConfig(self):
    648     """Load perf test configuration file.
    649 
    650     Returns:
    651       A dictionary that represents the config information.
    652     """
    653     config_file = os.path.join(os.path.dirname(__file__), 'perf.cfg')
    654     config = {'username': None,
    655               'password': None,
    656               'google_account_url': 'https://accounts.google.com/',
    657               'gmail_url': 'https://www.gmail.com',
    658               'plus_url': 'https://plus.google.com',
    659               'docs_url': 'https://docs.google.com'}
    660     if os.path.exists(config_file):
    661       try:
    662         new_config = pyauto.PyUITest.EvalDataFrom(config_file)
    663         for key in new_config:
    664           if new_config.get(key) is not None:
    665             config[key] = new_config.get(key)
    666       except SyntaxError, e:
    667         logging.info('Could not read %s: %s', config_file, str(e))
    668     return config
    669 
    670   def _LoginToGoogleAccount(self, account_key='test_google_account'):
    671     """Logs in to a test Google account.
    672 
    673     Login with user-defined credentials if they exist.
    674     Else login with private test credentials if they exist.
    675     Else fail.
    676 
    677     Args:
    678       account_key: The string key in private_tests_info.txt which is associated
    679                    with the test account login credentials to use. It will only
    680                    be used when fail to load user-defined credentials.
    681 
    682     Raises:
    683       RuntimeError: if could not get credential information.
    684     """
    685     private_file = os.path.join(pyauto.PyUITest.DataDir(), 'pyauto_private',
    686                                 'private_tests_info.txt')
    687     config_file = os.path.join(os.path.dirname(__file__), 'perf.cfg')
    688     config = self._GetConfig()
    689     google_account_url = config.get('google_account_url')
    690     username = config.get('username')
    691     password = config.get('password')
    692     if username and password:
    693       logging.info(
    694           'Using google account credential from %s',
    695           os.path.join(os.path.dirname(__file__), 'perf.cfg'))
    696     elif os.path.exists(private_file):
    697       creds = self.GetPrivateInfo()[account_key]
    698       username = creds['username']
    699       password = creds['password']
    700       logging.info(
    701           'User-defined credentials not found,' +
    702           ' using private test credentials instead.')
    703     else:
    704       message = 'No user-defined or private test ' \
    705                 'credentials could be found. ' \
    706                 'Please specify credential information in %s.' \
    707                 % config_file
    708       raise RuntimeError(message)
    709     test_utils.GoogleAccountsLogin(
    710         self, username, password, url=google_account_url)
    711     self.NavigateToURL('about:blank')  # Clear the existing tab.
    712 
    713   def _GetCPUUsage(self):
    714     """Returns machine's CPU usage.
    715 
    716     This function uses /proc/stat to identify CPU usage, and therefore works
    717     only on Linux/ChromeOS.
    718 
    719     Returns:
    720       A dictionary with 'user', 'nice', 'system' and 'idle' values.
    721       Sample dictionary:
    722       {
    723         'user': 254544,
    724         'nice': 9,
    725         'system': 254768,
    726         'idle': 2859878,
    727       }
    728     """
    729     try:
    730       f = open('/proc/stat')
    731       cpu_usage_str = f.readline().split()
    732       f.close()
    733     except IOError, e:
    734       self.fail('Could not retrieve CPU usage: ' + str(e))
    735     return {
    736       'user': int(cpu_usage_str[1]),
    737       'nice': int(cpu_usage_str[2]),
    738       'system': int(cpu_usage_str[3]),
    739       'idle': int(cpu_usage_str[4])
    740     }
    741 
    742   def _GetFractionNonIdleCPUTime(self, cpu_usage_start, cpu_usage_end):
    743     """Computes the fraction of CPU time spent non-idling.
    744 
    745     This function should be invoked using before/after values from calls to
    746     _GetCPUUsage().
    747     """
    748     time_non_idling_end = (cpu_usage_end['user'] + cpu_usage_end['nice'] +
    749                            cpu_usage_end['system'])
    750     time_non_idling_start = (cpu_usage_start['user'] + cpu_usage_start['nice'] +
    751                              cpu_usage_start['system'])
    752     total_time_end = (cpu_usage_end['user'] + cpu_usage_end['nice'] +
    753                       cpu_usage_end['system'] + cpu_usage_end['idle'])
    754     total_time_start = (cpu_usage_start['user'] + cpu_usage_start['nice'] +
    755                         cpu_usage_start['system'] + cpu_usage_start['idle'])
    756     return ((float(time_non_idling_end) - time_non_idling_start) /
    757             (total_time_end - total_time_start))
    758 
    759   def ExtraChromeFlags(self):
    760     """Ensures Chrome is launched with custom flags.
    761 
    762     Returns:
    763       A list of extra flags to pass to Chrome when it is launched.
    764     """
    765     flags = super(BasePerfTest, self).ExtraChromeFlags()
    766     # Window size impacts a variety of perf tests, ensure consistency.
    767     flags.append('--window-size=1024,768')
    768     if self._IsPGOMode():
    769       flags = flags + ['--child-clean-exit', '--no-sandbox']
    770     return flags
    771 
    772 
    773 class TabPerfTest(BasePerfTest):
    774   """Tests that involve opening tabs."""
    775 
    776   def testNewTab(self):
    777     """Measures time to open a new tab."""
    778     self._RunNewTabTest('NewTabPage',
    779                         lambda: self._AppendTab('chrome://newtab'), 'open_tab')
    780 
    781   def testNewTabFlash(self):
    782     """Measures time to open a new tab navigated to a flash page."""
    783     self.assertTrue(
    784         os.path.exists(os.path.join(self.ContentDataDir(), 'plugin',
    785                                     'flash.swf')),
    786         msg='Missing required flash data file.')
    787     url = self.GetFileURLForContentDataPath('plugin', 'flash.swf')
    788     self._RunNewTabTest('NewTabFlashPage', lambda: self._AppendTab(url),
    789                         'open_tab')
    790 
    791   def test20Tabs(self):
    792     """Measures time to open 20 tabs."""
    793     self._RunNewTabTest('20TabsNewTabPage',
    794                         lambda: self._AppendTab('chrome://newtab'),
    795                         'open_20_tabs', num_tabs=20)
    796 
    797 
    798 class BenchmarkPerfTest(BasePerfTest):
    799   """Benchmark performance tests."""
    800 
    801   def testV8BenchmarkSuite(self):
    802     """Measures score from v8 benchmark suite."""
    803     url = self.GetFileURLForDataPath('v8_benchmark_v6', 'run.html')
    804 
    805     def _RunBenchmarkOnce(url):
    806       """Runs the v8 benchmark suite once and returns the results in a dict."""
    807       self.assertTrue(self.AppendTab(pyauto.GURL(url)),
    808                       msg='Failed to append tab for v8 benchmark suite.')
    809       js_done = """
    810           var val = document.getElementById("status").innerHTML;
    811           window.domAutomationController.send(val);
    812       """
    813       self.assertTrue(
    814           self.WaitUntil(
    815               lambda: 'Score:' in self.ExecuteJavascript(js_done, tab_index=1),
    816               timeout=300, expect_retval=True, retry_sleep=1),
    817           msg='Timed out when waiting for v8 benchmark score.')
    818 
    819       js_get_results = """
    820           var result = {};
    821           result['final_score'] = document.getElementById("status").innerHTML;
    822           result['all_results'] = document.getElementById("results").innerHTML;
    823           window.domAutomationController.send(JSON.stringify(result));
    824       """
    825       results = eval(self.ExecuteJavascript(js_get_results, tab_index=1))
    826       score_pattern = '(\w+): (\d+)'
    827       final_score = re.search(score_pattern, results['final_score']).group(2)
    828       result_dict = {'final_score': int(final_score)}
    829       for match in re.finditer(score_pattern, results['all_results']):
    830         benchmark_name = match.group(1)
    831         benchmark_score = match.group(2)
    832         result_dict[benchmark_name] = int(benchmark_score)
    833       self.CloseTab(tab_index=1)
    834       return result_dict
    835 
    836     timings = {}
    837     for iteration in xrange(self._num_iterations + 1):
    838       result_dict = _RunBenchmarkOnce(url)
    839       # Ignore the first iteration.
    840       if iteration:
    841         for key, val in result_dict.items():
    842           timings.setdefault(key, []).append(val)
    843         logging.info('Iteration %d of %d:\n%s', iteration,
    844                      self._num_iterations, self.pformat(result_dict))
    845 
    846     for key, val in timings.items():
    847       if key == 'final_score':
    848         self._PrintSummaryResults('V8Benchmark', val, 'score',
    849                                   'v8_benchmark_final')
    850       else:
    851         self._PrintSummaryResults('V8Benchmark-%s' % key, val, 'score',
    852                                   'v8_benchmark_individual')
    853 
    854   def testSunSpider(self):
    855     """Runs the SunSpider javascript benchmark suite."""
    856     url = self.GetFileURLForDataPath('sunspider', 'sunspider-driver.html')
    857     self.assertTrue(self.AppendTab(pyauto.GURL(url)),
    858                     msg='Failed to append tab for SunSpider benchmark suite.')
    859 
    860     js_is_done = """
    861         var done = false;
    862         if (document.getElementById("console"))
    863           done = true;
    864         window.domAutomationController.send(JSON.stringify(done));
    865     """
    866     self.assertTrue(
    867         self.WaitUntil(
    868             lambda: self.ExecuteJavascript(js_is_done, tab_index=1),
    869             timeout=300, expect_retval='true', retry_sleep=1),
    870         msg='Timed out when waiting for SunSpider benchmark score.')
    871 
    872     js_get_results = """
    873         window.domAutomationController.send(
    874             document.getElementById("console").innerHTML);
    875     """
    876     # Append '<br>' to the result to simplify regular expression matching.
    877     results = self.ExecuteJavascript(js_get_results, tab_index=1) + '<br>'
    878     total = re.search('Total:\s*([\d.]+)ms', results).group(1)
    879     logging.info('Total: %f ms', float(total))
    880     self._OutputPerfGraphValue('SunSpider-total', float(total), 'ms',
    881                                'sunspider_total')
    882 
    883     for match_category in re.finditer('\s\s(\w+):\s*([\d.]+)ms.+?<br><br>',
    884                                       results):
    885       category_name = match_category.group(1)
    886       category_result = match_category.group(2)
    887       logging.info('Benchmark "%s": %f ms', category_name,
    888                    float(category_result))
    889       self._OutputPerfGraphValue('SunSpider-' + category_name,
    890                                  float(category_result), 'ms',
    891                                  'sunspider_individual')
    892 
    893       for match_result in re.finditer('<br>\s\s\s\s([\w-]+):\s*([\d.]+)ms',
    894                                       match_category.group(0)):
    895         result_name = match_result.group(1)
    896         result_value = match_result.group(2)
    897         logging.info('  Result "%s-%s": %f ms', category_name, result_name,
    898                      float(result_value))
    899         self._OutputPerfGraphValue(
    900             'SunSpider-%s-%s' % (category_name, result_name),
    901             float(result_value), 'ms', 'sunspider_individual')
    902 
    903   def testDromaeoSuite(self):
    904     """Measures results from Dromaeo benchmark suite."""
    905     url = self.GetFileURLForDataPath('dromaeo', 'index.html')
    906     self.assertTrue(self.AppendTab(pyauto.GURL(url + '?dromaeo')),
    907                     msg='Failed to append tab for Dromaeo benchmark suite.')
    908 
    909     js_is_ready = """
    910         var val = document.getElementById('pause').value;
    911         window.domAutomationController.send(val);
    912     """
    913     self.assertTrue(
    914         self.WaitUntil(
    915             lambda: self.ExecuteJavascript(js_is_ready, tab_index=1),
    916             timeout=30, expect_retval='Run', retry_sleep=1),
    917         msg='Timed out when waiting for Dromaeo benchmark to load.')
    918 
    919     js_run = """
    920         $('#pause').val('Run').click();
    921         window.domAutomationController.send('done');
    922     """
    923     self.ExecuteJavascript(js_run, tab_index=1)
    924 
    925     js_is_done = """
    926         var val = document.getElementById('timebar').innerHTML;
    927         window.domAutomationController.send(val);
    928     """
    929     self.assertTrue(
    930         self.WaitUntil(
    931             lambda: 'Total' in self.ExecuteJavascript(js_is_done, tab_index=1),
    932             timeout=900, expect_retval=True, retry_sleep=2),
    933         msg='Timed out when waiting for Dromaeo benchmark to complete.')
    934 
    935     js_get_results = """
    936         var result = {};
    937         result['total_result'] = $('#timebar strong').html();
    938         result['all_results'] = {};
    939         $('.result-item.done').each(function (i) {
    940             var group_name = $(this).find('.test b').html().replace(':', '');
    941             var group_results = {};
    942             group_results['result'] =
    943                 $(this).find('span').html().replace('runs/s', '')
    944 
    945             group_results['sub_groups'] = {}
    946             $(this).find('li').each(function (i) {
    947                 var sub_name = $(this).find('b').html().replace(':', '');
    948                 group_results['sub_groups'][sub_name] =
    949                     $(this).text().match(/: ([\d.]+)/)[1]
    950             });
    951             result['all_results'][group_name] = group_results;
    952         });
    953         window.domAutomationController.send(JSON.stringify(result));
    954     """
    955     results = eval(self.ExecuteJavascript(js_get_results, tab_index=1))
    956     total_result = results['total_result']
    957     logging.info('Total result: ' + total_result)
    958     self._OutputPerfGraphValue('Dromaeo-total', float(total_result),
    959                                'runsPerSec', 'dromaeo_total')
    960 
    961     for group_name, group in results['all_results'].iteritems():
    962       logging.info('Benchmark "%s": %s', group_name, group['result'])
    963       self._OutputPerfGraphValue('Dromaeo-' + group_name.replace(' ', ''),
    964                                  float(group['result']), 'runsPerSec',
    965                                  'dromaeo_individual')
    966       for benchmark_name, benchmark_score in group['sub_groups'].iteritems():
    967         logging.info('  Result "%s": %s', benchmark_name, benchmark_score)
    968 
    969   def testSpaceport(self):
    970     """Measures results from Spaceport benchmark suite."""
    971     # TODO(tonyg): Test is failing on bots. Diagnose and re-enable.
    972     pass
    973 
    974 #    url = self.GetFileURLForDataPath('third_party', 'spaceport', 'index.html')
    975 #    self.assertTrue(self.AppendTab(pyauto.GURL(url + '?auto')),
    976 #                    msg='Failed to append tab for Spaceport benchmark suite.')
    977 #
    978 #    # The test reports results to console.log in the format "name: value".
    979 #    # Inject a bit of JS to intercept those.
    980 #    js_collect_console_log = """
    981 #        window.__pyautoresult = {};
    982 #        window.console.log = function(str) {
    983 #            if (!str) return;
    984 #            var key_val = str.split(': ');
    985 #            if (!key_val.length == 2) return;
    986 #            __pyautoresult[key_val[0]] = key_val[1];
    987 #        };
    988 #        window.domAutomationController.send('done');
    989 #    """
    990 #    self.ExecuteJavascript(js_collect_console_log, tab_index=1)
    991 #
    992 #    def _IsDone():
    993 #      expected_num_results = 30  # The number of tests in benchmark.
    994 #      results = eval(self.ExecuteJavascript(js_get_results, tab_index=1))
    995 #      return expected_num_results == len(results)
    996 #
    997 #    js_get_results = """
    998 #        window.domAutomationController.send(
    999 #            JSON.stringify(window.__pyautoresult));
   1000 #    """
   1001 #    self.assertTrue(
   1002 #        self.WaitUntil(_IsDone, timeout=1200, expect_retval=True,
   1003 #                       retry_sleep=5),
   1004 #        msg='Timed out when waiting for Spaceport benchmark to complete.')
   1005 #    results = eval(self.ExecuteJavascript(js_get_results, tab_index=1))
   1006 #
   1007 #    for key in results:
   1008 #      suite, test = key.split('.')
   1009 #      value = float(results[key])
   1010 #      self._OutputPerfGraphValue(test, value, 'ObjectsAt30FPS', suite)
   1011 #    self._PrintSummaryResults('Overall', [float(x) for x in results.values()],
   1012 #                              'ObjectsAt30FPS', 'Overall')
   1013 
   1014 
   1015 class LiveWebappLoadTest(BasePerfTest):
   1016   """Tests that involve performance measurements of live webapps.
   1017 
   1018   These tests connect to live webpages (e.g., Gmail, Calendar, Docs) and are
   1019   therefore subject to network conditions.  These tests are meant to generate
   1020   "ball-park" numbers only (to see roughly how long things take to occur from a
   1021   user's perspective), and are not expected to be precise.
   1022   """
   1023 
   1024   def testNewTabGmail(self):
   1025     """Measures time to open a tab to a logged-in Gmail account.
   1026 
   1027     Timing starts right before the new tab is opened, and stops as soon as the
   1028     webpage displays the substring 'Last account activity:'.
   1029     """
   1030     EXPECTED_SUBSTRING = 'Last account activity:'
   1031 
   1032     def _SubstringExistsOnPage():
   1033       js = """
   1034           var frame = document.getElementById("canvas_frame");
   1035           var divs = frame.contentDocument.getElementsByTagName("div");
   1036           for (var i = 0; i < divs.length; ++i) {
   1037             if (divs[i].innerHTML.indexOf("%s") >= 0)
   1038               window.domAutomationController.send("true");
   1039           }
   1040           window.domAutomationController.send("false");
   1041       """ % EXPECTED_SUBSTRING
   1042       return self.ExecuteJavascript(js, tab_index=1)
   1043 
   1044     def _RunSingleGmailTabOpen():
   1045       self._AppendTab('http://www.gmail.com')
   1046       self.assertTrue(self.WaitUntil(_SubstringExistsOnPage, timeout=120,
   1047                                      expect_retval='true', retry_sleep=0.10),
   1048                       msg='Timed out waiting for expected Gmail string.')
   1049 
   1050     self._LoginToGoogleAccount()
   1051     self._RunNewTabTest('NewTabGmail', _RunSingleGmailTabOpen,
   1052                         'open_tab_live_webapp')
   1053 
   1054   def testNewTabCalendar(self):
   1055     """Measures time to open a tab to a logged-in Calendar account.
   1056 
   1057     Timing starts right before the new tab is opened, and stops as soon as the
   1058     webpage displays the calendar print button (title 'Print my calendar').
   1059     """
   1060     EXPECTED_SUBSTRING = 'Month'
   1061 
   1062     def _DivTitleStartsWith():
   1063       js = """
   1064           var divs = document.getElementsByTagName("div");
   1065           for (var i = 0; i < divs.length; ++i) {
   1066             if (divs[i].innerHTML == "%s")
   1067               window.domAutomationController.send("true");
   1068           }
   1069           window.domAutomationController.send("false");
   1070       """ % EXPECTED_SUBSTRING
   1071       return self.ExecuteJavascript(js, tab_index=1)
   1072 
   1073     def _RunSingleCalendarTabOpen():
   1074       self._AppendTab('http://calendar.google.com')
   1075       self.assertTrue(self.WaitUntil(_DivTitleStartsWith, timeout=120,
   1076                                      expect_retval='true', retry_sleep=0.10),
   1077                       msg='Timed out waiting for expected Calendar string.')
   1078 
   1079     self._LoginToGoogleAccount()
   1080     self._RunNewTabTest('NewTabCalendar', _RunSingleCalendarTabOpen,
   1081                         'open_tab_live_webapp')
   1082 
   1083   def testNewTabDocs(self):
   1084     """Measures time to open a tab to a logged-in Docs account.
   1085 
   1086     Timing starts right before the new tab is opened, and stops as soon as the
   1087     webpage displays the expected substring 'last modified' (case insensitive).
   1088     """
   1089     EXPECTED_SUBSTRING = 'sort'
   1090 
   1091     def _SubstringExistsOnPage():
   1092       js = """
   1093           var divs = document.getElementsByTagName("div");
   1094           for (var i = 0; i < divs.length; ++i) {
   1095             if (divs[i].innerHTML.toLowerCase().indexOf("%s") >= 0)
   1096               window.domAutomationController.send("true");
   1097           }
   1098           window.domAutomationController.send("false");
   1099       """ % EXPECTED_SUBSTRING
   1100       return self.ExecuteJavascript(js, tab_index=1)
   1101 
   1102     def _RunSingleDocsTabOpen():
   1103       self._AppendTab('http://docs.google.com')
   1104       self.assertTrue(self.WaitUntil(_SubstringExistsOnPage, timeout=120,
   1105                                      expect_retval='true', retry_sleep=0.10),
   1106                       msg='Timed out waiting for expected Docs string.')
   1107 
   1108     self._LoginToGoogleAccount()
   1109     self._RunNewTabTest('NewTabDocs', _RunSingleDocsTabOpen,
   1110                         'open_tab_live_webapp')
   1111 
   1112 
   1113 class NetflixPerfTest(BasePerfTest, NetflixTestHelper):
   1114   """Test Netflix video performance."""
   1115 
   1116   def __init__(self, methodName='runTest', **kwargs):
   1117     pyauto.PyUITest.__init__(self, methodName, **kwargs)
   1118     NetflixTestHelper.__init__(self, self)
   1119 
   1120   def tearDown(self):
   1121     self.SignOut()
   1122     pyauto.PyUITest.tearDown(self)
   1123 
   1124   def testNetflixDroppedFrames(self):
   1125     """Measures the Netflix video dropped frames/second. Runs for 60 secs."""
   1126     self.LoginAndStartPlaying()
   1127     self.CheckNetflixPlaying(self.IS_PLAYING,
   1128                              'Player did not start playing the title.')
   1129     # Ignore first 10 seconds of video playing so we get smooth videoplayback.
   1130     time.sleep(10)
   1131     init_dropped_frames = self._GetVideoDroppedFrames()
   1132     dropped_frames = []
   1133     prev_dropped_frames = 0
   1134     for iteration in xrange(60):
   1135       # Ignoring initial dropped frames of first 10 seconds.
   1136       total_dropped_frames = self._GetVideoDroppedFrames() - init_dropped_frames
   1137       dropped_frames_last_sec = total_dropped_frames - prev_dropped_frames
   1138       dropped_frames.append(dropped_frames_last_sec)
   1139       logging.info('Iteration %d of %d: %f dropped frames in the last second',
   1140                    iteration + 1, 60, dropped_frames_last_sec)
   1141       prev_dropped_frames = total_dropped_frames
   1142       # Play the video for some time.
   1143       time.sleep(1)
   1144     self._PrintSummaryResults('NetflixDroppedFrames', dropped_frames, 'frames',
   1145                               'netflix_dropped_frames')
   1146 
   1147   def testNetflixCPU(self):
   1148     """Measures the Netflix video CPU usage. Runs for 60 seconds."""
   1149     self.LoginAndStartPlaying()
   1150     self.CheckNetflixPlaying(self.IS_PLAYING,
   1151                              'Player did not start playing the title.')
   1152     # Ignore first 10 seconds of video playing so we get smooth videoplayback.
   1153     time.sleep(10)
   1154     init_dropped_frames = self._GetVideoDroppedFrames()
   1155     init_video_frames = self._GetVideoFrames()
   1156     cpu_usage_start = self._GetCPUUsage()
   1157     total_shown_frames = 0
   1158     # Play the video for some time.
   1159     time.sleep(60)
   1160     total_video_frames = self._GetVideoFrames() - init_video_frames
   1161     total_dropped_frames = self._GetVideoDroppedFrames() - init_dropped_frames
   1162     cpu_usage_end = self._GetCPUUsage()
   1163     fraction_non_idle_time = \
   1164         self._GetFractionNonIdleCPUTime(cpu_usage_start, cpu_usage_end)
   1165     # Counting extrapolation for utilization to play the video.
   1166     extrapolation_value = fraction_non_idle_time * \
   1167         (float(total_video_frames) + total_dropped_frames) / total_video_frames
   1168     logging.info('Netflix CPU extrapolation: %f', extrapolation_value)
   1169     self._OutputPerfGraphValue('NetflixCPUExtrapolation', extrapolation_value,
   1170                                'extrapolation', 'netflix_cpu_extrapolation')
   1171 
   1172 
   1173 class YoutubePerfTest(BasePerfTest, YoutubeTestHelper):
   1174   """Test Youtube video performance."""
   1175 
   1176   def __init__(self, methodName='runTest', **kwargs):
   1177     pyauto.PyUITest.__init__(self, methodName, **kwargs)
   1178     YoutubeTestHelper.__init__(self, self)
   1179 
   1180   def _VerifyVideoTotalBytes(self):
   1181     """Returns true if video total bytes information is available."""
   1182     return self.GetVideoTotalBytes() > 0
   1183 
   1184   def _VerifyVideoLoadedBytes(self):
   1185     """Returns true if video loaded bytes information is available."""
   1186     return self.GetVideoLoadedBytes() > 0
   1187 
   1188   def StartVideoForPerformance(self, video_id='zuzaxlddWbk'):
   1189     """Start the test video with all required buffering."""
   1190     self.PlayVideoAndAssert(video_id)
   1191     self.ExecuteJavascript("""
   1192         ytplayer.setPlaybackQuality('hd720');
   1193         window.domAutomationController.send('');
   1194     """)
   1195     self.AssertPlayerState(state=self.is_playing,
   1196                            msg='Player did not enter the playing state')
   1197     self.assertTrue(
   1198         self.WaitUntil(self._VerifyVideoTotalBytes, expect_retval=True),
   1199         msg='Failed to get video total bytes information.')
   1200     self.assertTrue(
   1201         self.WaitUntil(self._VerifyVideoLoadedBytes, expect_retval=True),
   1202         msg='Failed to get video loaded bytes information')
   1203     loaded_video_bytes = self.GetVideoLoadedBytes()
   1204     total_video_bytes = self.GetVideoTotalBytes()
   1205     self.PauseVideo()
   1206     logging.info('total_video_bytes: %f', total_video_bytes)
   1207     # Wait for the video to finish loading.
   1208     while total_video_bytes > loaded_video_bytes:
   1209       loaded_video_bytes = self.GetVideoLoadedBytes()
   1210       logging.info('loaded_video_bytes: %f', loaded_video_bytes)
   1211       time.sleep(1)
   1212     self.PlayVideo()
   1213     # Ignore first 10 seconds of video playing so we get smooth videoplayback.
   1214     time.sleep(10)
   1215 
   1216   def testYoutubeDroppedFrames(self):
   1217     """Measures the Youtube video dropped frames/second. Runs for 60 secs.
   1218 
   1219     This test measures Youtube video dropped frames for three different types
   1220     of videos like slow, normal and fast motion.
   1221     """
   1222     youtube_video = {'Slow': 'VT1-sitWRtY',
   1223                      'Normal': '2tqK_3mKQUw',
   1224                      'Fast': '8ETDE0VGJY4',
   1225                     }
   1226     for video_type in youtube_video:
   1227       logging.info('Running %s video.', video_type)
   1228       self.StartVideoForPerformance(youtube_video[video_type])
   1229       init_dropped_frames = self.GetVideoDroppedFrames()
   1230       total_dropped_frames = 0
   1231       dropped_fps = []
   1232       for iteration in xrange(60):
   1233         frames = self.GetVideoDroppedFrames() - init_dropped_frames
   1234         current_dropped_frames = frames - total_dropped_frames
   1235         dropped_fps.append(current_dropped_frames)
   1236         logging.info('Iteration %d of %d: %f dropped frames in the last '
   1237                      'second', iteration + 1, 60, current_dropped_frames)
   1238         total_dropped_frames = frames
   1239         # Play the video for some time
   1240         time.sleep(1)
   1241       graph_description = 'YoutubeDroppedFrames' + video_type
   1242       self._PrintSummaryResults(graph_description, dropped_fps, 'frames',
   1243                                 'youtube_dropped_frames')
   1244 
   1245   def testYoutubeCPU(self):
   1246     """Measures the Youtube video CPU usage. Runs for 60 seconds.
   1247 
   1248     Measures the Youtube video CPU usage (between 0 and 1), extrapolated to
   1249     totalframes in the video by taking dropped frames into account. For smooth
   1250     videoplayback this number should be < 0.5..1.0 on a hyperthreaded CPU.
   1251     """
   1252     self.StartVideoForPerformance()
   1253     init_dropped_frames = self.GetVideoDroppedFrames()
   1254     logging.info('init_dropped_frames: %f', init_dropped_frames)
   1255     cpu_usage_start = self._GetCPUUsage()
   1256     total_shown_frames = 0
   1257     for sec_num in xrange(60):
   1258       # Play the video for some time.
   1259       time.sleep(1)
   1260       total_shown_frames = total_shown_frames + self.GetVideoFrames()
   1261       logging.info('total_shown_frames: %f', total_shown_frames)
   1262     total_dropped_frames = self.GetVideoDroppedFrames() - init_dropped_frames
   1263     logging.info('total_dropped_frames: %f', total_dropped_frames)
   1264     cpu_usage_end = self._GetCPUUsage()
   1265     fraction_non_idle_time = self._GetFractionNonIdleCPUTime(
   1266         cpu_usage_start, cpu_usage_end)
   1267     logging.info('fraction_non_idle_time: %f', fraction_non_idle_time)
   1268     total_frames = total_shown_frames + total_dropped_frames
   1269     # Counting extrapolation for utilization to play the video.
   1270     extrapolation_value = (fraction_non_idle_time *
   1271                            (float(total_frames) / total_shown_frames))
   1272     logging.info('Youtube CPU extrapolation: %f', extrapolation_value)
   1273     # Video is still running so log some more detailed data.
   1274     self._LogProcessActivity()
   1275     self._OutputPerfGraphValue('YoutubeCPUExtrapolation', extrapolation_value,
   1276                                'extrapolation', 'youtube_cpu_extrapolation')
   1277 
   1278 
   1279 class FlashVideoPerfTest(BasePerfTest):
   1280   """General flash video performance tests."""
   1281 
   1282   def FlashVideo1080P(self):
   1283     """Measures total dropped frames and average FPS for a 1080p flash video.
   1284 
   1285     This is a temporary test to be run manually for now, needed to collect some
   1286     performance statistics across different ChromeOS devices.
   1287     """
   1288     # Open up the test webpage; it's assumed the test will start automatically.
   1289     webpage_url = 'http://www/~arscott/fl/FlashVideoTests.html'
   1290     self.assertTrue(self.AppendTab(pyauto.GURL(webpage_url)),
   1291                     msg='Failed to append tab for webpage.')
   1292 
   1293     # Wait until the test is complete.
   1294     js_is_done = """
   1295         window.domAutomationController.send(JSON.stringify(tests_done));
   1296     """
   1297     self.assertTrue(
   1298         self.WaitUntil(
   1299             lambda: self.ExecuteJavascript(js_is_done, tab_index=1) == 'true',
   1300             timeout=300, expect_retval=True, retry_sleep=1),
   1301         msg='Timed out when waiting for test result.')
   1302 
   1303     # Retrieve and output the test results.
   1304     js_results = """
   1305         window.domAutomationController.send(JSON.stringify(tests_results));
   1306     """
   1307     test_result = eval(self.ExecuteJavascript(js_results, tab_index=1))
   1308     test_result[0] = test_result[0].replace('true', 'True')
   1309     test_result = eval(test_result[0])  # Webpage only does 1 test right now.
   1310 
   1311     description = 'FlashVideo1080P'
   1312     result = test_result['averageFPS']
   1313     logging.info('Result for %s: %f FPS (average)', description, result)
   1314     self._OutputPerfGraphValue(description, result, 'FPS',
   1315                                'flash_video_1080p_fps')
   1316     result = test_result['droppedFrames']
   1317     logging.info('Result for %s: %f dropped frames', description, result)
   1318     self._OutputPerfGraphValue(description, result, 'DroppedFrames',
   1319                                'flash_video_1080p_dropped_frames')
   1320 
   1321 
   1322 class WebGLTest(BasePerfTest):
   1323   """Tests for WebGL performance."""
   1324 
   1325   def _RunWebGLTest(self, url, description, graph_name):
   1326     """Measures FPS using a specified WebGL demo.
   1327 
   1328     Args:
   1329       url: The string URL that, once loaded, will run the WebGL demo (default
   1330           WebGL demo settings are used, since this test does not modify any
   1331           settings in the demo).
   1332       description: A string description for this demo, used as a performance
   1333           value description.  Should not contain any spaces.
   1334       graph_name: A string name for the performance graph associated with this
   1335           test.  Only used on Chrome desktop.
   1336     """
   1337     self.assertTrue(self.AppendTab(pyauto.GURL(url)),
   1338                     msg='Failed to append tab for %s.' % description)
   1339 
   1340     get_fps_js = """
   1341       var fps_field = document.getElementById("fps");
   1342       var result = -1;
   1343       if (fps_field)
   1344         result = fps_field.innerHTML;
   1345       window.domAutomationController.send(JSON.stringify(result));
   1346     """
   1347 
   1348     # Wait until we start getting FPS values.
   1349     self.assertTrue(
   1350         self.WaitUntil(
   1351             lambda: self.ExecuteJavascript(get_fps_js, tab_index=1) != '-1',
   1352             timeout=300, retry_sleep=1),
   1353         msg='Timed out when waiting for FPS values to be available.')
   1354 
   1355     # Let the experiment run for 5 seconds before we start collecting perf
   1356     # measurements.
   1357     time.sleep(5)
   1358 
   1359     # Collect the current FPS value each second for the next 30 seconds.  The
   1360     # final result of this test will be the average of these FPS values.
   1361     fps_vals = []
   1362     for iteration in xrange(30):
   1363       fps = self.ExecuteJavascript(get_fps_js, tab_index=1)
   1364       fps = float(fps.replace('"', ''))
   1365       fps_vals.append(fps)
   1366       logging.info('Iteration %d of %d: %f FPS', iteration + 1, 30, fps)
   1367       time.sleep(1)
   1368     self._PrintSummaryResults(description, fps_vals, 'fps', graph_name)
   1369 
   1370   def testWebGLAquarium(self):
   1371     """Measures performance using the WebGL Aquarium demo."""
   1372     self._RunWebGLTest(
   1373         self.GetFileURLForDataPath('pyauto_private', 'webgl', 'aquarium',
   1374                                    'aquarium.html'),
   1375         'WebGLAquarium', 'webgl_demo')
   1376 
   1377   def testWebGLField(self):
   1378     """Measures performance using the WebGL Field demo."""
   1379     self._RunWebGLTest(
   1380         self.GetFileURLForDataPath('pyauto_private', 'webgl', 'field',
   1381                                    'field.html'),
   1382         'WebGLField', 'webgl_demo')
   1383 
   1384   def testWebGLSpaceRocks(self):
   1385     """Measures performance using the WebGL SpaceRocks demo."""
   1386     self._RunWebGLTest(
   1387         self.GetFileURLForDataPath('pyauto_private', 'webgl', 'spacerocks',
   1388                                    'spacerocks.html'),
   1389         'WebGLSpaceRocks', 'webgl_demo')
   1390 
   1391 
   1392 class GPUPerfTest(BasePerfTest):
   1393   """Tests for GPU performance."""
   1394 
   1395   def setUp(self):
   1396     """Performs necessary setup work before running each test in this class."""
   1397     self._gpu_info_dict = self.EvalDataFrom(os.path.join(self.DataDir(),
   1398                                             'gpu', 'gpuperf.txt'))
   1399     self._demo_name_url_dict = self._gpu_info_dict['demo_info']
   1400     pyauto.PyUITest.setUp(self)
   1401 
   1402   def _MeasureFpsOverTime(self, tab_index=0):
   1403     """Measures FPS using a specified demo.
   1404 
   1405     This function assumes that the demo is already loaded in the specified tab
   1406     index.
   1407 
   1408     Args:
   1409       tab_index: The tab index, default is 0.
   1410     """
   1411     # Let the experiment run for 5 seconds before we start collecting FPS
   1412     # values.
   1413     time.sleep(5)
   1414 
   1415     # Collect the current FPS value each second for the next 10 seconds.
   1416     # Then return the average FPS value from among those collected.
   1417     fps_vals = []
   1418     for iteration in xrange(10):
   1419       fps = self.GetFPS(tab_index=tab_index)
   1420       fps_vals.append(fps['fps'])
   1421       time.sleep(1)
   1422     return Mean(fps_vals)
   1423 
   1424   def _GetStdAvgAndCompare(self, avg_fps, description, ref_dict):
   1425     """Computes the average and compare set of values with reference data.
   1426 
   1427     Args:
   1428       avg_fps: Average fps value.
   1429       description: A string description for this demo, used as a performance
   1430                    value description.
   1431       ref_dict: Dictionary which contains reference data for this test case.
   1432 
   1433     Returns:
   1434       True, if the actual FPS value is within 10% of the reference FPS value,
   1435       or False, otherwise.
   1436     """
   1437     std_fps = 0
   1438     status = True
   1439     # Load reference data according to platform.
   1440     platform_ref_dict = None
   1441     if self.IsWin():
   1442       platform_ref_dict = ref_dict['win']
   1443     elif self.IsMac():
   1444       platform_ref_dict = ref_dict['mac']
   1445     elif self.IsLinux():
   1446       platform_ref_dict = ref_dict['linux']
   1447     else:
   1448       self.assertFail(msg='This platform is unsupported.')
   1449     std_fps = platform_ref_dict[description]
   1450     # Compare reference data to average fps.
   1451     # We allow the average FPS value to be within 10% of the reference
   1452     # FPS value.
   1453     if avg_fps < (0.9 * std_fps):
   1454       logging.info('FPS difference exceeds threshold for: %s', description)
   1455       logging.info('  Average: %f fps', avg_fps)
   1456       logging.info('Reference Average: %f fps', std_fps)
   1457       status = False
   1458     else:
   1459       logging.info('Average FPS is actually greater than 10 percent '
   1460                    'more than the reference FPS for: %s', description)
   1461       logging.info('  Average: %f fps', avg_fps)
   1462       logging.info('  Reference Average: %f fps', std_fps)
   1463     return status
   1464 
   1465   def testLaunchDemosParallelInSeparateTabs(self):
   1466     """Measures performance of demos in different tabs in same browser."""
   1467     # Launch all the demos parallel in separate tabs
   1468     counter = 0
   1469     all_demos_passed = True
   1470     ref_dict = self._gpu_info_dict['separate_tab_ref_data']
   1471     # Iterate through dictionary and append all url to browser
   1472     for url in self._demo_name_url_dict.iterkeys():
   1473       self.assertTrue(
   1474           self.AppendTab(pyauto.GURL(self._demo_name_url_dict[url])),
   1475           msg='Failed to append tab for %s.' % url)
   1476       counter += 1
   1477       # Assert number of tab count is equal to number of tabs appended.
   1478       self.assertEqual(self.GetTabCount(), counter + 1)
   1479       # Measures performance using different demos and compare it golden
   1480       # reference.
   1481     for url in self._demo_name_url_dict.iterkeys():
   1482       avg_fps = self._MeasureFpsOverTime(tab_index=counter)
   1483       # Get the reference value of fps and compare the results
   1484       if not self._GetStdAvgAndCompare(avg_fps, url, ref_dict):
   1485         all_demos_passed = False
   1486       counter -= 1
   1487     self.assertTrue(
   1488         all_demos_passed,
   1489         msg='One or more demos failed to yield an acceptable FPS value')
   1490 
   1491   def testLaunchDemosInSeparateBrowser(self):
   1492     """Measures performance by launching each demo in a separate tab."""
   1493     # Launch demos in the browser
   1494     ref_dict = self._gpu_info_dict['separate_browser_ref_data']
   1495     all_demos_passed = True
   1496     for url in self._demo_name_url_dict.iterkeys():
   1497       self.NavigateToURL(self._demo_name_url_dict[url])
   1498       # Measures performance using different demos.
   1499       avg_fps = self._MeasureFpsOverTime()
   1500       self.RestartBrowser()
   1501       # Get the standard value of fps and compare the rseults
   1502       if not self._GetStdAvgAndCompare(avg_fps, url, ref_dict):
   1503         all_demos_passed = False
   1504     self.assertTrue(
   1505         all_demos_passed,
   1506         msg='One or more demos failed to yield an acceptable FPS value')
   1507 
   1508   def testLaunchDemosBrowseForwardBackward(self):
   1509     """Measures performance of various demos in browser going back and forth."""
   1510     ref_dict = self._gpu_info_dict['browse_back_forward_ref_data']
   1511     url_array = []
   1512     desc_array = []
   1513     all_demos_passed = True
   1514     # Get URL/Description from dictionary and put in individual array
   1515     for url in self._demo_name_url_dict.iterkeys():
   1516       url_array.append(self._demo_name_url_dict[url])
   1517       desc_array.append(url)
   1518     for index in range(len(url_array) - 1):
   1519       # Launch demo in the Browser
   1520       if index == 0:
   1521         self.NavigateToURL(url_array[index])
   1522         # Measures performance using the first demo.
   1523         avg_fps = self._MeasureFpsOverTime()
   1524         status1 = self._GetStdAvgAndCompare(avg_fps, desc_array[index],
   1525                                             ref_dict)
   1526       # Measures performance using the second demo.
   1527       self.NavigateToURL(url_array[index + 1])
   1528       avg_fps = self._MeasureFpsOverTime()
   1529       status2 = self._GetStdAvgAndCompare(avg_fps, desc_array[index + 1],
   1530                                           ref_dict)
   1531       # Go Back to previous demo
   1532       self.TabGoBack()
   1533       # Measures performance for first demo when moved back
   1534       avg_fps = self._MeasureFpsOverTime()
   1535       status3 = self._GetStdAvgAndCompare(
   1536           avg_fps, desc_array[index] + '_backward',
   1537           ref_dict)
   1538       # Go Forward to previous demo
   1539       self.TabGoForward()
   1540       # Measures performance for second demo when moved forward
   1541       avg_fps = self._MeasureFpsOverTime()
   1542       status4 = self._GetStdAvgAndCompare(
   1543           avg_fps, desc_array[index + 1] + '_forward',
   1544           ref_dict)
   1545       if not all([status1, status2, status3, status4]):
   1546         all_demos_passed = False
   1547     self.assertTrue(
   1548         all_demos_passed,
   1549         msg='One or more demos failed to yield an acceptable FPS value')
   1550 
   1551 
   1552 class HTML5BenchmarkTest(BasePerfTest):
   1553   """Tests for HTML5 performance."""
   1554 
   1555   def testHTML5Benchmark(self):
   1556     """Measures performance using the benchmark at html5-benchmark.com."""
   1557     self.NavigateToURL('http://html5-benchmark.com')
   1558 
   1559     start_benchmark_js = """
   1560       benchmark();
   1561       window.domAutomationController.send("done");
   1562     """
   1563     self.ExecuteJavascript(start_benchmark_js)
   1564 
   1565     js_final_score = """
   1566       var score = "-1";
   1567       var elem = document.getElementById("score");
   1568       if (elem)
   1569         score = elem.innerHTML;
   1570       window.domAutomationController.send(score);
   1571     """
   1572     # Wait for the benchmark to complete, which is assumed to be when the value
   1573     # of the 'score' DOM element changes to something other than '87485'.
   1574     self.assertTrue(
   1575         self.WaitUntil(
   1576             lambda: self.ExecuteJavascript(js_final_score) != '87485',
   1577             timeout=900, retry_sleep=1),
   1578         msg='Timed out when waiting for final score to be available.')
   1579 
   1580     score = self.ExecuteJavascript(js_final_score)
   1581     logging.info('HTML5 Benchmark final score: %f', float(score))
   1582     self._OutputPerfGraphValue('HTML5Benchmark', float(score), 'score',
   1583                                'html5_benchmark')
   1584 
   1585 
   1586 class FileUploadDownloadTest(BasePerfTest):
   1587   """Tests that involve measuring performance of upload and download."""
   1588 
   1589   def setUp(self):
   1590     """Performs necessary setup work before running each test in this class."""
   1591     self._temp_dir = tempfile.mkdtemp()
   1592     self._test_server = PerfTestServer(self._temp_dir)
   1593     self._test_server_port = self._test_server.GetPort()
   1594     self._test_server.Run()
   1595     self.assertTrue(self.WaitUntil(self._IsTestServerRunning),
   1596                     msg='Failed to start local performance test server.')
   1597     BasePerfTest.setUp(self)
   1598 
   1599   def tearDown(self):
   1600     """Performs necessary cleanup work after running each test in this class."""
   1601     BasePerfTest.tearDown(self)
   1602     self._test_server.ShutDown()
   1603     pyauto_utils.RemovePath(self._temp_dir)
   1604 
   1605   def _IsTestServerRunning(self):
   1606     """Determines whether the local test server is ready to accept connections.
   1607 
   1608     Returns:
   1609       True, if a connection can be made to the local performance test server, or
   1610       False otherwise.
   1611     """
   1612     conn = None
   1613     try:
   1614       conn = urllib2.urlopen('http://localhost:%d' % self._test_server_port)
   1615       return True
   1616     except IOError, e:
   1617       return False
   1618     finally:
   1619       if conn:
   1620         conn.close()
   1621 
   1622   def testDownload100MBFile(self):
   1623     """Measures the time to download a 100 MB file from a local server."""
   1624     CREATE_100MB_URL = (
   1625         'http://localhost:%d/create_file_of_size?filename=data&mb=100' %
   1626         self._test_server_port)
   1627     DOWNLOAD_100MB_URL = 'http://localhost:%d/data' % self._test_server_port
   1628     DELETE_100MB_URL = ('http://localhost:%d/delete_file?filename=data' %
   1629                         self._test_server_port)
   1630 
   1631     # Tell the local server to create a 100 MB file.
   1632     self.NavigateToURL(CREATE_100MB_URL)
   1633 
   1634     # Cleaning up downloaded files is done in the same way as in downloads.py.
   1635     # We first identify all existing downloaded files, then remove only those
   1636     # new downloaded files that appear during the course of this test.
   1637     download_dir = self.GetDownloadDirectory().value()
   1638     orig_downloads = []
   1639     if os.path.isdir(download_dir):
   1640       orig_downloads = os.listdir(download_dir)
   1641 
   1642     def _CleanupAdditionalFilesInDir(directory, orig_files):
   1643       """Removes the additional files in the specified directory.
   1644 
   1645       This function will remove all files from |directory| that are not
   1646       specified in |orig_files|.
   1647 
   1648       Args:
   1649         directory: A string directory path.
   1650         orig_files: A list of strings representing the original set of files in
   1651             the specified directory.
   1652       """
   1653       downloads_to_remove = []
   1654       if os.path.isdir(directory):
   1655         downloads_to_remove = [os.path.join(directory, name)
   1656                                for name in os.listdir(directory)
   1657                                if name not in orig_files]
   1658       for file_name in downloads_to_remove:
   1659         pyauto_utils.RemovePath(file_name)
   1660 
   1661     def _DownloadFile(url):
   1662       self.DownloadAndWaitForStart(url)
   1663       self.WaitForAllDownloadsToComplete(timeout=2 * 60 * 1000)  # 2 minutes.
   1664 
   1665     timings = []
   1666     for iteration in range(self._num_iterations + 1):
   1667       elapsed_time = self._MeasureElapsedTime(
   1668           lambda: _DownloadFile(DOWNLOAD_100MB_URL), num_invocations=1)
   1669       # Ignore the first iteration.
   1670       if iteration:
   1671         timings.append(elapsed_time)
   1672         logging.info('Iteration %d of %d: %f milliseconds', iteration,
   1673                      self._num_iterations, elapsed_time)
   1674       self.SetDownloadShelfVisible(False)
   1675       _CleanupAdditionalFilesInDir(download_dir, orig_downloads)
   1676 
   1677     self._PrintSummaryResults('Download100MBFile', timings, 'milliseconds',
   1678                               'download_file')
   1679 
   1680     # Tell the local server to delete the 100 MB file.
   1681     self.NavigateToURL(DELETE_100MB_URL)
   1682 
   1683   def testUpload50MBFile(self):
   1684     """Measures the time to upload a 50 MB file to a local server."""
   1685     # TODO(dennisjeffrey): Replace the use of XMLHttpRequest in this test with
   1686     # FileManager automation to select the upload file when crosbug.com/17903
   1687     # is complete.
   1688     START_UPLOAD_URL = (
   1689         'http://localhost:%d/start_upload?mb=50' % self._test_server_port)
   1690 
   1691     EXPECTED_SUBSTRING = 'Upload complete'
   1692 
   1693     def _IsUploadComplete():
   1694       js = """
   1695           result = "";
   1696           var div = document.getElementById("upload_result");
   1697           if (div)
   1698             result = div.innerHTML;
   1699           window.domAutomationController.send(result);
   1700       """
   1701       return self.ExecuteJavascript(js).find(EXPECTED_SUBSTRING) >= 0
   1702 
   1703     def _RunSingleUpload():
   1704       self.NavigateToURL(START_UPLOAD_URL)
   1705       self.assertTrue(
   1706           self.WaitUntil(_IsUploadComplete, timeout=120, expect_retval=True,
   1707                          retry_sleep=0.10),
   1708           msg='Upload failed to complete before the timeout was hit.')
   1709 
   1710     timings = []
   1711     for iteration in range(self._num_iterations + 1):
   1712       elapsed_time = self._MeasureElapsedTime(_RunSingleUpload)
   1713       # Ignore the first iteration.
   1714       if iteration:
   1715         timings.append(elapsed_time)
   1716         logging.info('Iteration %d of %d: %f milliseconds', iteration,
   1717                      self._num_iterations, elapsed_time)
   1718 
   1719     self._PrintSummaryResults('Upload50MBFile', timings, 'milliseconds',
   1720                               'upload_file')
   1721 
   1722 
   1723 class ScrollResults(object):
   1724   """Container for ScrollTest results."""
   1725 
   1726   def __init__(self, first_paint_seconds, results_list):
   1727     assert len(results_list) == 2, 'Expecting initial and repeat results.'
   1728     self._first_paint_time = 1000.0 * first_paint_seconds
   1729     self._results_list = results_list
   1730 
   1731   def GetFirstPaintTime(self):
   1732     return self._first_paint_time
   1733 
   1734   def GetFrameCount(self, index):
   1735     results = self._results_list[index]
   1736     return results.get('numFramesSentToScreen', results['numAnimationFrames'])
   1737 
   1738   def GetFps(self, index):
   1739     return (self.GetFrameCount(index) /
   1740             self._results_list[index]['totalTimeInSeconds'])
   1741 
   1742   def GetMeanFrameTime(self, index):
   1743     return (self._results_list[index]['totalTimeInSeconds'] /
   1744             self.GetFrameCount(index))
   1745 
   1746   def GetPercentBelow60Fps(self, index):
   1747     return (float(self._results_list[index]['droppedFrameCount']) /
   1748             self.GetFrameCount(index))
   1749 
   1750 
   1751 class BaseScrollTest(BasePerfTest):
   1752   """Base class for tests measuring scrolling performance."""
   1753 
   1754   def setUp(self):
   1755     """Performs necessary setup work before running each test."""
   1756     super(BaseScrollTest, self).setUp()
   1757     scroll_file = os.path.join(self.DataDir(), 'scroll', 'scroll.js')
   1758     with open(scroll_file) as f:
   1759       self._scroll_text = f.read()
   1760 
   1761   def ExtraChromeFlags(self):
   1762     """Ensures Chrome is launched with custom flags.
   1763 
   1764     Returns:
   1765       A list of extra flags to pass to Chrome when it is launched.
   1766     """
   1767     # Extra flag used by scroll performance tests.
   1768     return (super(BaseScrollTest, self).ExtraChromeFlags() +
   1769             ['--enable-gpu-benchmarking'])
   1770 
   1771   def RunSingleInvocation(self, url, is_gmail_test=False):
   1772     """Runs a single invocation of the scroll test.
   1773 
   1774     Args:
   1775       url: The string url for the webpage on which to run the scroll test.
   1776       is_gmail_test: True iff the test is a GMail test.
   1777 
   1778     Returns:
   1779       Instance of ScrollResults.
   1780     """
   1781 
   1782     self.assertTrue(self.AppendTab(pyauto.GURL(url)),
   1783                     msg='Failed to append tab for webpage.')
   1784 
   1785     timeout = pyauto.PyUITest.ActionTimeoutChanger(self, 300 * 1000)  # ms
   1786     test_js = """%s;
   1787         new __ScrollTest(function(results) {
   1788           var stringify = JSON.stringify || JSON.encode;
   1789           window.domAutomationController.send(stringify(results));
   1790         }, %s);
   1791     """ % (self._scroll_text, 'true' if is_gmail_test else 'false')
   1792     results = simplejson.loads(self.ExecuteJavascript(test_js, tab_index=1))
   1793 
   1794     first_paint_js = ('window.domAutomationController.send('
   1795                       '(chrome.loadTimes().firstPaintTime - '
   1796                       'chrome.loadTimes().requestTime).toString());')
   1797     first_paint_time = float(self.ExecuteJavascript(first_paint_js,
   1798                                                     tab_index=1))
   1799 
   1800     self.CloseTab(tab_index=1)
   1801 
   1802     return ScrollResults(first_paint_time, results)
   1803 
   1804   def RunScrollTest(self, url, description, graph_name, is_gmail_test=False):
   1805     """Runs a scroll performance test on the specified webpage.
   1806 
   1807     Args:
   1808       url: The string url for the webpage on which to run the scroll test.
   1809       description: A string description for the particular test being run.
   1810       graph_name: A string name for the performance graph associated with this
   1811           test.  Only used on Chrome desktop.
   1812       is_gmail_test: True iff the test is a GMail test.
   1813     """
   1814     results = []
   1815     for iteration in range(self._num_iterations + 1):
   1816       result = self.RunSingleInvocation(url, is_gmail_test)
   1817       # Ignore the first iteration.
   1818       if iteration:
   1819         fps = result.GetFps(1)
   1820         assert fps, '%s did not scroll' % url
   1821         logging.info('Iteration %d of %d: %f fps', iteration,
   1822                      self._num_iterations, fps)
   1823         results.append(result)
   1824     self._PrintSummaryResults(
   1825         description, [r.GetFps(1) for r in results],
   1826         'FPS', graph_name)
   1827 
   1828 
   1829 class PopularSitesScrollTest(BaseScrollTest):
   1830   """Measures scrolling performance on recorded versions of popular sites."""
   1831 
   1832   def ExtraChromeFlags(self):
   1833     """Ensures Chrome is launched with custom flags.
   1834 
   1835     Returns:
   1836       A list of extra flags to pass to Chrome when it is launched.
   1837     """
   1838     return super(PopularSitesScrollTest,
   1839                  self).ExtraChromeFlags() + PageCyclerReplay.CHROME_FLAGS
   1840 
   1841   def _GetUrlList(self, test_name):
   1842     """Returns list of recorded sites."""
   1843     sites_path = PageCyclerReplay.Path('page_sets', test_name=test_name)
   1844     with open(sites_path) as f:
   1845       sites_text = f.read()
   1846     js = """
   1847       %s
   1848       window.domAutomationController.send(JSON.stringify(pageSets));
   1849     """ % sites_text
   1850     page_sets = eval(self.ExecuteJavascript(js))
   1851     return list(itertools.chain(*page_sets))[1:]  # Skip first.
   1852 
   1853   def _PrintScrollResults(self, results):
   1854     self._PrintSummaryResults(
   1855         'initial', [r.GetMeanFrameTime(0) for r in results],
   1856         'ms', 'FrameTimes')
   1857     self._PrintSummaryResults(
   1858         'repeat', [r.GetMeanFrameTime(1) for r in results],
   1859         'ms', 'FrameTimes')
   1860     self._PrintSummaryResults(
   1861         'initial',
   1862         [r.GetPercentBelow60Fps(0) for r in results],
   1863         'percent', 'PercentBelow60FPS')
   1864     self._PrintSummaryResults(
   1865         'repeat',
   1866         [r.GetPercentBelow60Fps(1) for r in results],
   1867         'percent', 'PercentBelow60FPS')
   1868     self._PrintSummaryResults(
   1869         'first_paint_time', [r.GetFirstPaintTime() for r in results],
   1870         'ms', 'FirstPaintTime')
   1871 
   1872   def test2012Q3(self):
   1873     test_name = '2012Q3'
   1874     urls = self._GetUrlList(test_name)
   1875     results = []
   1876     with PageCyclerReplay.ReplayServer(test_name) as replay_server:
   1877       if replay_server.is_record_mode:
   1878         self._num_iterations = 1
   1879       for iteration in range(self._num_iterations):
   1880         for url in urls:
   1881           result = self.RunSingleInvocation(url)
   1882           fps = result.GetFps(0)
   1883           assert fps, '%s did not scroll' % url
   1884           logging.info('Iteration %d of %d: %f fps', iteration + 1,
   1885                        self._num_iterations, fps)
   1886           results.append(result)
   1887     self._PrintScrollResults(results)
   1888 
   1889 
   1890 class ScrollTest(BaseScrollTest):
   1891   """Tests to measure scrolling performance."""
   1892 
   1893   def ExtraChromeFlags(self):
   1894     """Ensures Chrome is launched with custom flags.
   1895 
   1896     Returns:
   1897       A list of extra flags to pass to Chrome when it is launched.
   1898     """
   1899     # Extra flag needed by scroll performance tests.
   1900     return super(ScrollTest, self).ExtraChromeFlags() + ['--disable-gpu-vsync']
   1901 
   1902   def testBlankPageScroll(self):
   1903     """Runs the scroll test on a blank page."""
   1904     self.RunScrollTest(
   1905         self.GetFileURLForDataPath('scroll', 'blank.html'), 'ScrollBlankPage',
   1906         'scroll_fps')
   1907 
   1908   def testTextScroll(self):
   1909     """Runs the scroll test on a text-filled page."""
   1910     self.RunScrollTest(
   1911         self.GetFileURLForDataPath('scroll', 'text.html'), 'ScrollTextPage',
   1912         'scroll_fps')
   1913 
   1914   def testGooglePlusScroll(self):
   1915     """Runs the scroll test on a Google Plus anonymized page."""
   1916     self.RunScrollTest(
   1917         self.GetFileURLForDataPath('scroll', 'plus.html'),
   1918         'ScrollGooglePlusPage', 'scroll_fps')
   1919 
   1920   def testGmailScroll(self):
   1921     """Runs the scroll test using the live Gmail site."""
   1922     self._LoginToGoogleAccount(account_key='test_google_account_gmail')
   1923     self.RunScrollTest('http://www.gmail.com', 'ScrollGmail',
   1924                        'scroll_fps', True)
   1925 
   1926 
   1927 class FlashTest(BasePerfTest):
   1928   """Tests to measure flash performance."""
   1929 
   1930   def _RunFlashTestForAverageFPS(self, webpage_url, description, graph_name):
   1931     """Runs a single flash test that measures an average FPS value.
   1932 
   1933     Args:
   1934       webpage_url: The string URL to a webpage that will run the test.
   1935       description: A string description for this test.
   1936       graph_name: A string name for the performance graph associated with this
   1937           test.  Only used on Chrome desktop.
   1938     """
   1939     # Open up the test webpage; it's assumed the test will start automatically.
   1940     self.assertTrue(self.AppendTab(pyauto.GURL(webpage_url)),
   1941                     msg='Failed to append tab for webpage.')
   1942 
   1943     # Wait until the final result is computed, then retrieve and output it.
   1944     js = """
   1945         window.domAutomationController.send(
   1946             JSON.stringify(final_average_fps));
   1947     """
   1948     self.assertTrue(
   1949         self.WaitUntil(
   1950             lambda: self.ExecuteJavascript(js, tab_index=1) != '-1',
   1951             timeout=300, expect_retval=True, retry_sleep=1),
   1952         msg='Timed out when waiting for test result.')
   1953     result = float(self.ExecuteJavascript(js, tab_index=1))
   1954     logging.info('Result for %s: %f FPS (average)', description, result)
   1955     self._OutputPerfGraphValue(description, result, 'FPS', graph_name)
   1956 
   1957   def testFlashGaming(self):
   1958     """Runs a simple flash gaming benchmark test."""
   1959     webpage_url = self.GetHttpURLForDataPath('pyauto_private', 'flash',
   1960                                              'FlashGamingTest2.html')
   1961     self._RunFlashTestForAverageFPS(webpage_url, 'FlashGaming', 'flash_fps')
   1962 
   1963   def testFlashText(self):
   1964     """Runs a simple flash text benchmark test."""
   1965     webpage_url = self.GetHttpURLForDataPath('pyauto_private', 'flash',
   1966                                              'FlashTextTest2.html')
   1967     self._RunFlashTestForAverageFPS(webpage_url, 'FlashText', 'flash_fps')
   1968 
   1969   def testScimarkGui(self):
   1970     """Runs the ScimarkGui benchmark tests."""
   1971     webpage_url = self.GetHttpURLForDataPath('pyauto_private', 'flash',
   1972                                              'scimarkGui.html')
   1973     self.assertTrue(self.AppendTab(pyauto.GURL(webpage_url)),
   1974                     msg='Failed to append tab for webpage.')
   1975 
   1976     js = 'window.domAutomationController.send(JSON.stringify(tests_done));'
   1977     self.assertTrue(
   1978         self.WaitUntil(
   1979             lambda: self.ExecuteJavascript(js, tab_index=1), timeout=300,
   1980             expect_retval='true', retry_sleep=1),
   1981         msg='Timed out when waiting for tests to complete.')
   1982 
   1983     js_result = """
   1984         var result = {};
   1985         for (var i = 0; i < tests_results.length; ++i) {
   1986           var test_name = tests_results[i][0];
   1987           var mflops = tests_results[i][1];
   1988           var mem = tests_results[i][2];
   1989           result[test_name] = [mflops, mem]
   1990         }
   1991         window.domAutomationController.send(JSON.stringify(result));
   1992     """
   1993     result = eval(self.ExecuteJavascript(js_result, tab_index=1))
   1994     for benchmark in result:
   1995       mflops = float(result[benchmark][0])
   1996       mem = float(result[benchmark][1])
   1997       if benchmark.endswith('_mflops'):
   1998         benchmark = benchmark[:benchmark.find('_mflops')]
   1999       logging.info('Results for ScimarkGui_%s:', benchmark)
   2000       logging.info('  %f MFLOPS', mflops)
   2001       logging.info('  %f MB', mem)
   2002       self._OutputPerfGraphValue('ScimarkGui-%s-MFLOPS' % benchmark, mflops,
   2003                                  'MFLOPS', 'scimark_gui_mflops')
   2004       self._OutputPerfGraphValue('ScimarkGui-%s-Mem' % benchmark, mem, 'MB',
   2005                                  'scimark_gui_mem')
   2006 
   2007 
   2008 class LiveGamePerfTest(BasePerfTest):
   2009   """Tests to measure performance of live gaming webapps."""
   2010 
   2011   def _RunLiveGamePerfTest(self, url, url_title_substring,
   2012                            description, graph_name):
   2013     """Measures performance metrics for the specified live gaming webapp.
   2014 
   2015     This function connects to the specified URL to launch the gaming webapp,
   2016     waits for a period of time for the webapp to run, then collects some
   2017     performance metrics about the running webapp.
   2018 
   2019     Args:
   2020       url: The string URL of the gaming webapp to analyze.
   2021       url_title_substring: A string that is expected to be a substring of the
   2022           webpage title for the specified gaming webapp.  Used to verify that
   2023           the webapp loads correctly.
   2024       description: A string description for this game, used in the performance
   2025           value description.  Should not contain any spaces.
   2026       graph_name: A string name for the performance graph associated with this
   2027           test.  Only used on Chrome desktop.
   2028     """
   2029     self.NavigateToURL(url)
   2030     loaded_tab_title = self.GetActiveTabTitle()
   2031     self.assertTrue(url_title_substring in loaded_tab_title,
   2032                     msg='Loaded tab title missing "%s": "%s"' %
   2033                         (url_title_substring, loaded_tab_title))
   2034     cpu_usage_start = self._GetCPUUsage()
   2035 
   2036     # Let the app run for 1 minute.
   2037     time.sleep(60)
   2038 
   2039     cpu_usage_end = self._GetCPUUsage()
   2040     fraction_non_idle_time = self._GetFractionNonIdleCPUTime(
   2041         cpu_usage_start, cpu_usage_end)
   2042 
   2043     logging.info('Fraction of CPU time spent non-idle: %f',
   2044                  fraction_non_idle_time)
   2045     self._OutputPerfGraphValue(description + 'CpuBusy', fraction_non_idle_time,
   2046                                'Fraction', graph_name + '_cpu_busy')
   2047     v8_heap_stats = self.GetV8HeapStats()
   2048     v8_heap_size = v8_heap_stats['v8_memory_used'] / (1024.0 * 1024.0)
   2049     logging.info('Total v8 heap size: %f MB', v8_heap_size)
   2050     self._OutputPerfGraphValue(description + 'V8HeapSize', v8_heap_size, 'MB',
   2051                                graph_name + '_v8_heap_size')
   2052 
   2053   def testAngryBirds(self):
   2054     """Measures performance for Angry Birds."""
   2055     self._RunLiveGamePerfTest('http://chrome.angrybirds.com', 'Angry Birds',
   2056                               'AngryBirds', 'angry_birds')
   2057 
   2058 
   2059 class BasePageCyclerTest(BasePerfTest):
   2060   """Page class for page cycler tests.
   2061 
   2062   Derived classes must implement StartUrl().
   2063 
   2064   Environment Variables:
   2065     PC_NO_AUTO: if set, avoids automatically loading pages.
   2066   """
   2067   MAX_ITERATION_SECONDS = 60
   2068   TRIM_PERCENT = 20
   2069   DEFAULT_USE_AUTO = True
   2070 
   2071   # Page Cycler lives in src/data/page_cycler rather than src/chrome/test/data
   2072   DATA_PATH = os.path.abspath(
   2073       os.path.join(BasePerfTest.DataDir(), os.pardir, os.pardir,
   2074                    os.pardir, 'data', 'page_cycler'))
   2075 
   2076   def setUp(self):
   2077     """Performs necessary setup work before running each test."""
   2078     super(BasePageCyclerTest, self).setUp()
   2079     self.use_auto = 'PC_NO_AUTO' not in os.environ
   2080 
   2081   @classmethod
   2082   def DataPath(cls, subdir):
   2083     return os.path.join(cls.DATA_PATH, subdir)
   2084 
   2085   def ExtraChromeFlags(self):
   2086     """Ensures Chrome is launched with custom flags.
   2087 
   2088     Returns:
   2089       A list of extra flags to pass to Chrome when it is launched.
   2090     """
   2091     # Extra flags required to run these tests.
   2092     # The first two are needed for the test.
   2093     # The plugins argument is to prevent bad scores due to pop-ups from
   2094     # running an old version of something (like Flash).
   2095     return (super(BasePageCyclerTest, self).ExtraChromeFlags() +
   2096             ['--js-flags="--expose_gc"',
   2097              '--enable-file-cookies',
   2098              '--allow-outdated-plugins'])
   2099 
   2100   def WaitUntilStarted(self, start_url):
   2101     """Check that the test navigates away from the start_url."""
   2102     js_is_started = """
   2103         var is_started = document.location.href !== "%s";
   2104         window.domAutomationController.send(JSON.stringify(is_started));
   2105     """ % start_url
   2106     self.assertTrue(
   2107         self.WaitUntil(lambda: self.ExecuteJavascript(js_is_started) == 'true',
   2108                        timeout=10),
   2109         msg='Timed out when waiting to leave start page.')
   2110 
   2111   def WaitUntilDone(self, url, iterations):
   2112     """Check cookies for "__pc_done=1" to know the test is over."""
   2113     def IsDone():
   2114       cookies = self.GetCookie(pyauto.GURL(url))  # window 0, tab 0
   2115       return '__pc_done=1' in cookies
   2116     self.assertTrue(
   2117         self.WaitUntil(
   2118             IsDone,
   2119             timeout=(self.MAX_ITERATION_SECONDS * iterations),
   2120             retry_sleep=1),
   2121         msg='Timed out waiting for page cycler test to complete.')
   2122 
   2123   def CollectPagesAndTimes(self, url):
   2124     """Collect the results from the cookies."""
   2125     pages, times = None, None
   2126     cookies = self.GetCookie(pyauto.GURL(url))  # window 0, tab 0
   2127     for cookie in cookies.split(';'):
   2128       if '__pc_pages' in cookie:
   2129         pages_str = cookie.split('=', 1)[1]
   2130         pages = pages_str.split(',')
   2131       elif '__pc_timings' in cookie:
   2132         times_str = cookie.split('=', 1)[1]
   2133         times = [float(t) for t in times_str.split(',')]
   2134     self.assertTrue(pages and times,
   2135                     msg='Unable to find test results in cookies: %s' % cookies)
   2136     return pages, times
   2137 
   2138   def IteratePageTimes(self, pages, times, iterations):
   2139     """Regroup the times by the page.
   2140 
   2141     Args:
   2142       pages: the list of pages
   2143       times: e.g. [page1_iter1, page2_iter1, ..., page1_iter2, page2_iter2, ...]
   2144       iterations: the number of times for each page
   2145     Yields:
   2146       (pageN, [pageN_iter1, pageN_iter2, ...])
   2147     """
   2148     num_pages = len(pages)
   2149     num_times = len(times)
   2150     expected_num_times = num_pages * iterations
   2151     self.assertEqual(
   2152         expected_num_times, num_times,
   2153         msg=('num_times != num_pages * iterations: %s != %s * %s, times=%s' %
   2154              (num_times, num_pages, iterations, times)))
   2155     for i, page in enumerate(pages):
   2156       yield page, list(itertools.islice(times, i, None, num_pages))
   2157 
   2158   def CheckPageTimes(self, pages, times, iterations):
   2159     """Assert that all the times are greater than zero."""
   2160     failed_pages = []
   2161     for page, times in self.IteratePageTimes(pages, times, iterations):
   2162       failed_times = [t for t in times if t <= 0.0]
   2163       if failed_times:
   2164         failed_pages.append((page, failed_times))
   2165     if failed_pages:
   2166       self.fail('Pages with unexpected times: %s' % failed_pages)
   2167 
   2168   def TrimTimes(self, times, percent):
   2169     """Return a new list with |percent| number of times trimmed for each page.
   2170 
   2171     Removes the largest and smallest values.
   2172     """
   2173     iterations = len(times)
   2174     times = sorted(times)
   2175     num_to_trim = int(iterations * float(percent) / 100.0)
   2176     logging.debug('Before trimming %d: %s' % (num_to_trim, times))
   2177     a = num_to_trim / 2
   2178     b = iterations - (num_to_trim / 2 + num_to_trim % 2)
   2179     trimmed_times = times[a:b]
   2180     logging.debug('After trimming: %s', trimmed_times)
   2181     return trimmed_times
   2182 
   2183   def ComputeFinalResult(self, pages, times, iterations):
   2184     """The final score that is calculated is a geometric mean of the
   2185     arithmetic means of each page's load time, and we drop the
   2186     upper/lower 20% of the times for each page so they don't skew the
   2187     mean.  The geometric mean is used for the final score because the
   2188     time range for any given site may be very different, and we don't
   2189     want slower sites to weight more heavily than others.
   2190     """
   2191     self.CheckPageTimes(pages, times, iterations)
   2192     page_means = [
   2193         Mean(self.TrimTimes(times, percent=self.TRIM_PERCENT))
   2194         for _, times in self.IteratePageTimes(pages, times, iterations)]
   2195     return GeometricMean(page_means)
   2196 
   2197   def StartUrl(self, test_name, iterations):
   2198     """Return the URL to used to start the test.
   2199 
   2200     Derived classes must implement this.
   2201     """
   2202     raise NotImplemented
   2203 
   2204   def RunPageCyclerTest(self, name, description):
   2205     """Runs the specified PageCycler test.
   2206 
   2207     Args:
   2208       name: the page cycler test name (corresponds to a directory or test file)
   2209       description: a string description for the test
   2210     """
   2211     iterations = self._num_iterations
   2212     start_url = self.StartUrl(name, iterations)
   2213     self.NavigateToURL(start_url)
   2214     if self.use_auto:
   2215       self.WaitUntilStarted(start_url)
   2216     self.WaitUntilDone(start_url, iterations)
   2217     pages, times = self.CollectPagesAndTimes(start_url)
   2218     final_result = self.ComputeFinalResult(pages, times, iterations)
   2219     logging.info('%s page cycler final result: %f' %
   2220                  (description, final_result))
   2221     self._OutputPerfGraphValue(description + '_PageCycler', final_result,
   2222                                'milliseconds', graph_name='PageCycler')
   2223 
   2224 
   2225 class PageCyclerTest(BasePageCyclerTest):
   2226   """Tests to run various page cyclers.
   2227 
   2228   Environment Variables:
   2229     PC_NO_AUTO: if set, avoids automatically loading pages.
   2230   """
   2231 
   2232   def _PreReadDataDir(self, subdir):
   2233     """This recursively reads all of the files in a given url directory.
   2234 
   2235     The intent is to get them into memory before they are used by the benchmark.
   2236 
   2237     Args:
   2238       subdir: a subdirectory of the page cycler data directory.
   2239     """
   2240     def _PreReadDir(dirname, names):
   2241       for rfile in names:
   2242         with open(os.path.join(dirname, rfile)) as fp:
   2243           fp.read()
   2244     for root, dirs, files in os.walk(self.DataPath(subdir)):
   2245       _PreReadDir(root, files)
   2246 
   2247   def StartUrl(self, test_name, iterations):
   2248     # Must invoke GetFileURLForPath before appending parameters to the URL,
   2249     # otherwise those parameters will get quoted.
   2250     start_url = self.GetFileURLForPath(self.DataPath(test_name), 'start.html')
   2251     start_url += '?iterations=%d' % iterations
   2252     if self.use_auto:
   2253       start_url += '&auto=1'
   2254     return start_url
   2255 
   2256   def RunPageCyclerTest(self, dirname, description):
   2257     """Runs the specified PageCycler test.
   2258 
   2259     Args:
   2260       dirname: directory containing the page cycler test
   2261       description: a string description for the test
   2262     """
   2263     self._PreReadDataDir('common')
   2264     self._PreReadDataDir(dirname)
   2265     super(PageCyclerTest, self).RunPageCyclerTest(dirname, description)
   2266 
   2267   def testMoreJSFile(self):
   2268     self.RunPageCyclerTest('morejs', 'MoreJSFile')
   2269 
   2270   def testAlexaFile(self):
   2271     self.RunPageCyclerTest('alexa_us', 'Alexa_usFile')
   2272 
   2273   def testBloatFile(self):
   2274     self.RunPageCyclerTest('bloat', 'BloatFile')
   2275 
   2276   def testDHTMLFile(self):
   2277     self.RunPageCyclerTest('dhtml', 'DhtmlFile')
   2278 
   2279   def testIntl1File(self):
   2280     self.RunPageCyclerTest('intl1', 'Intl1File')
   2281 
   2282   def testIntl2File(self):
   2283     self.RunPageCyclerTest('intl2', 'Intl2File')
   2284 
   2285   def testMozFile(self):
   2286     self.RunPageCyclerTest('moz', 'MozFile')
   2287 
   2288   def testMoz2File(self):
   2289     self.RunPageCyclerTest('moz2', 'Moz2File')
   2290 
   2291 
   2292 class PageCyclerReplay(object):
   2293   """Run page cycler tests with network simulation via Web Page Replay.
   2294 
   2295   Web Page Replay is a proxy that can record and "replay" web pages with
   2296   simulated network characteristics -- without having to edit the pages
   2297   by hand. With WPR, tests can use "real" web content, and catch
   2298   performance issues that may result from introducing network delays and
   2299   bandwidth throttling.
   2300   """
   2301   _PATHS = {
   2302       'archive':    'src/data/page_cycler/webpagereplay/{test_name}.wpr',
   2303       'page_sets':  'src/tools/page_cycler/webpagereplay/tests/{test_name}.js',
   2304       'start_page': 'src/tools/page_cycler/webpagereplay/start.html',
   2305       'extension':  'src/tools/page_cycler/webpagereplay/extension',
   2306       }
   2307 
   2308   WEBPAGEREPLAY_HOST = '127.0.0.1'
   2309   WEBPAGEREPLAY_HTTP_PORT = 8080
   2310   WEBPAGEREPLAY_HTTPS_PORT = 8413
   2311 
   2312   CHROME_FLAGS = webpagereplay.GetChromeFlags(
   2313       WEBPAGEREPLAY_HOST,
   2314       WEBPAGEREPLAY_HTTP_PORT,
   2315       WEBPAGEREPLAY_HTTPS_PORT) + [
   2316           '--log-level=0',
   2317           '--disable-background-networking',
   2318           '--enable-experimental-extension-apis',
   2319           '--enable-logging',
   2320           '--enable-benchmarking',
   2321           '--enable-net-benchmarking',
   2322           '--metrics-recording-only',
   2323           '--activate-on-launch',
   2324           '--no-first-run',
   2325           '--no-proxy-server',
   2326           ]
   2327 
   2328   @classmethod
   2329   def Path(cls, key, **kwargs):
   2330     return FormatChromePath(cls._PATHS[key], **kwargs)
   2331 
   2332   @classmethod
   2333   def ReplayServer(cls, test_name, replay_options=None):
   2334     archive_path = cls.Path('archive', test_name=test_name)
   2335     return webpagereplay.ReplayServer(archive_path,
   2336                                       cls.WEBPAGEREPLAY_HOST,
   2337                                       cls.WEBPAGEREPLAY_HTTP_PORT,
   2338                                       cls.WEBPAGEREPLAY_HTTPS_PORT,
   2339                                       replay_options)
   2340 
   2341 
   2342 class PageCyclerNetSimTest(BasePageCyclerTest):
   2343   """Tests to run Web Page Replay backed page cycler tests."""
   2344   MAX_ITERATION_SECONDS = 180
   2345 
   2346   def ExtraChromeFlags(self):
   2347     """Ensures Chrome is launched with custom flags.
   2348 
   2349     Returns:
   2350       A list of extra flags to pass to Chrome when it is launched.
   2351     """
   2352     flags = super(PageCyclerNetSimTest, self).ExtraChromeFlags()
   2353     flags.append('--load-extension=%s' % PageCyclerReplay.Path('extension'))
   2354     flags.extend(PageCyclerReplay.CHROME_FLAGS)
   2355     return flags
   2356 
   2357   def StartUrl(self, test_name, iterations):
   2358     start_path = PageCyclerReplay.Path('start_page')
   2359     start_url = 'file://%s?test=%s&iterations=%d' % (
   2360         start_path, test_name, iterations)
   2361     if self.use_auto:
   2362       start_url += '&auto=1'
   2363     return start_url
   2364 
   2365   def RunPageCyclerTest(self, test_name, description):
   2366     """Runs the specified PageCycler test.
   2367 
   2368     Args:
   2369       test_name: name for archive (.wpr) and config (.js) files.
   2370       description: a string description for the test
   2371     """
   2372     replay_options = None
   2373     with PageCyclerReplay.ReplayServer(test_name, replay_options) as server:
   2374       if server.is_record_mode:
   2375         self._num_iterations = 1
   2376       super_self = super(PageCyclerNetSimTest, self)
   2377       super_self.RunPageCyclerTest(test_name, description)
   2378 
   2379   def test2012Q2(self):
   2380     self.RunPageCyclerTest('2012Q2', '2012Q2')
   2381 
   2382 
   2383 class MemoryTest(BasePerfTest):
   2384   """Tests to measure memory consumption under different usage scenarios."""
   2385 
   2386   def ExtraChromeFlags(self):
   2387     """Launches Chrome with custom flags.
   2388 
   2389     Returns:
   2390       A list of extra flags to pass to Chrome when it is launched.
   2391     """
   2392     # Ensure Chrome assigns one renderer process to each tab.
   2393     return super(MemoryTest, self).ExtraChromeFlags() + ['--process-per-tab']
   2394 
   2395   def _RecordMemoryStats(self, description, when, duration):
   2396     """Outputs memory statistics to be graphed.
   2397 
   2398     Args:
   2399       description: A string description for the test.  Should not contain
   2400           spaces.  For example, 'MemCtrl'.
   2401       when: A string description of when the memory stats are being recorded
   2402           during test execution (since memory stats may be recorded multiple
   2403           times during a test execution at certain "interesting" times).  Should
   2404           not contain spaces.
   2405       duration: The number of seconds to sample data before outputting the
   2406           memory statistics.
   2407     """
   2408     mem = self.GetMemoryStatsChromeOS(duration)
   2409     measurement_types = [
   2410       ('gem_obj', 'GemObj'),
   2411       ('gtt', 'GTT'),
   2412       ('mem_free', 'MemFree'),
   2413       ('mem_available', 'MemAvail'),
   2414       ('mem_shared', 'MemShare'),
   2415       ('mem_cached', 'MemCache'),
   2416       ('mem_anon', 'MemAnon'),
   2417       ('mem_file', 'MemFile'),
   2418       ('mem_slab', 'MemSlab'),
   2419       ('browser_priv', 'BrowPriv'),
   2420       ('browser_shared', 'BrowShar'),
   2421       ('gpu_priv', 'GpuPriv'),
   2422       ('gpu_shared', 'GpuShar'),
   2423       ('renderer_priv', 'RendPriv'),
   2424       ('renderer_shared', 'RendShar'),
   2425     ]
   2426     for type_key, type_string in measurement_types:
   2427       if type_key not in mem:
   2428         continue
   2429       self._OutputPerfGraphValue(
   2430           '%s-Min%s-%s' % (description, type_string, when),
   2431           mem[type_key]['min'], 'KB', '%s-%s' % (description, type_string))
   2432       self._OutputPerfGraphValue(
   2433           '%s-Max%s-%s' % (description, type_string, when),
   2434           mem[type_key]['max'], 'KB', '%s-%s' % (description, type_string))
   2435       self._OutputPerfGraphValue(
   2436           '%s-End%s-%s' % (description, type_string, when),
   2437           mem[type_key]['end'], 'KB', '%s-%s' % (description, type_string))
   2438 
   2439   def _RunTest(self, tabs, description, duration):
   2440     """Runs a general memory test.
   2441 
   2442     Args:
   2443       tabs: A list of strings representing the URLs of the websites to open
   2444           during this test.
   2445       description: A string description for the test.  Should not contain
   2446           spaces.  For example, 'MemCtrl'.
   2447       duration: The number of seconds to sample data before outputting memory
   2448           statistics.
   2449     """
   2450     self._RecordMemoryStats(description, '0Tabs0', duration)
   2451 
   2452     for iteration_num in xrange(2):
   2453       for site in tabs:
   2454         self.AppendTab(pyauto.GURL(site))
   2455 
   2456       self._RecordMemoryStats(description,
   2457                               '%dTabs%d' % (len(tabs), iteration_num + 1),
   2458                               duration)
   2459 
   2460       for _ in xrange(len(tabs)):
   2461         self.CloseTab(tab_index=1)
   2462 
   2463       self._RecordMemoryStats(description, '0Tabs%d' % (iteration_num + 1),
   2464                               duration)
   2465 
   2466   def testOpenCloseTabsControl(self):
   2467     """Measures memory usage when opening/closing tabs to about:blank."""
   2468     tabs = ['about:blank'] * 10
   2469     self._RunTest(tabs, 'MemCtrl', 15)
   2470 
   2471   def testOpenCloseTabsLiveSites(self):
   2472     """Measures memory usage when opening/closing tabs to live sites."""
   2473     tabs = [
   2474       'http://www.google.com/gmail',
   2475       'http://www.google.com/calendar',
   2476       'http://www.google.com/plus',
   2477       'http://www.google.com/youtube',
   2478       'http://www.nytimes.com',
   2479       'http://www.cnn.com',
   2480       'http://www.facebook.com/zuck',
   2481       'http://www.techcrunch.com',
   2482       'http://www.theverge.com',
   2483       'http://www.yahoo.com',
   2484     ]
   2485     # Log in to a test Google account to make connections to the above Google
   2486     # websites more interesting.
   2487     self._LoginToGoogleAccount()
   2488     self._RunTest(tabs, 'MemLive', 20)
   2489 
   2490 
   2491 class PerfTestServerRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
   2492   """Request handler for the local performance test server."""
   2493 
   2494   def _IgnoreHandler(self, unused_args):
   2495     """A GET request handler that simply replies with status code 200.
   2496 
   2497     Args:
   2498       unused_args: A dictionary of arguments for the current GET request.
   2499           The arguments are ignored.
   2500     """
   2501     self.send_response(200)
   2502     self.end_headers()
   2503 
   2504   def _CreateFileOfSizeHandler(self, args):
   2505     """A GET handler that creates a local file with the specified size.
   2506 
   2507     Args:
   2508       args: A dictionary of arguments for the current GET request.  Must
   2509           contain 'filename' and 'mb' keys that refer to the name of the file
   2510           to create and its desired size, respectively.
   2511     """
   2512     megabytes = None
   2513     filename = None
   2514     try:
   2515       megabytes = int(args['mb'][0])
   2516       filename = args['filename'][0]
   2517     except (ValueError, KeyError, IndexError), e:
   2518       logging.exception('Server error creating file: %s', e)
   2519     assert megabytes and filename
   2520     with open(os.path.join(self.server.docroot, filename), 'wb') as f:
   2521       f.write('X' * 1024 * 1024 * megabytes)
   2522     self.send_response(200)
   2523     self.end_headers()
   2524 
   2525   def _DeleteFileHandler(self, args):
   2526     """A GET handler that deletes the specified local file.
   2527 
   2528     Args:
   2529       args: A dictionary of arguments for the current GET request.  Must
   2530           contain a 'filename' key that refers to the name of the file to
   2531           delete, relative to the server's document root.
   2532     """
   2533     filename = None
   2534     try:
   2535       filename = args['filename'][0]
   2536     except (KeyError, IndexError), e:
   2537       logging.exception('Server error deleting file: %s', e)
   2538     assert filename
   2539     try:
   2540       os.remove(os.path.join(self.server.docroot, filename))
   2541     except OSError, e:
   2542       logging.warning('OS error removing file: %s', e)
   2543     self.send_response(200)
   2544     self.end_headers()
   2545 
   2546   def _StartUploadHandler(self, args):
   2547     """A GET handler to serve a page that uploads the given amount of data.
   2548 
   2549     When the page loads, the specified amount of data is automatically
   2550     uploaded to the same local server that is handling the current request.
   2551 
   2552     Args:
   2553       args: A dictionary of arguments for the current GET request.  Must
   2554           contain an 'mb' key that refers to the size of the data to upload.
   2555     """
   2556     megabytes = None
   2557     try:
   2558       megabytes = int(args['mb'][0])
   2559     except (ValueError, KeyError, IndexError), e:
   2560       logging.exception('Server error starting upload: %s', e)
   2561     assert megabytes
   2562     script = """
   2563         <html>
   2564           <head>
   2565             <script type='text/javascript'>
   2566               function startUpload() {
   2567                 var megabytes = %s;
   2568                 var data = Array((1024 * 1024 * megabytes) + 1).join('X');
   2569                 var boundary = '***BOUNDARY***';
   2570                 var xhr = new XMLHttpRequest();
   2571 
   2572                 xhr.open('POST', 'process_upload', true);
   2573                 xhr.setRequestHeader(
   2574                     'Content-Type',
   2575                     'multipart/form-data; boundary="' + boundary + '"');
   2576                 xhr.setRequestHeader('Content-Length', data.length);
   2577                 xhr.onreadystatechange = function() {
   2578                   if (xhr.readyState == 4 && xhr.status == 200) {
   2579                     document.getElementById('upload_result').innerHTML =
   2580                         xhr.responseText;
   2581                   }
   2582                 };
   2583                 var body = '--' + boundary + '\\r\\n';
   2584                 body += 'Content-Disposition: form-data;' +
   2585                         'file_contents=' + data;
   2586                 xhr.send(body);
   2587               }
   2588             </script>
   2589           </head>
   2590 
   2591           <body onload="startUpload();">
   2592             <div id='upload_result'>Uploading...</div>
   2593           </body>
   2594         </html>
   2595     """ % megabytes
   2596     self.send_response(200)
   2597     self.end_headers()
   2598     self.wfile.write(script)
   2599 
   2600   def _ProcessUploadHandler(self, form):
   2601     """A POST handler that discards uploaded data and sends a response.
   2602 
   2603     Args:
   2604       form: A dictionary containing posted form data, as returned by
   2605           urlparse.parse_qs().
   2606     """
   2607     upload_processed = False
   2608     file_size = 0
   2609     if 'file_contents' in form:
   2610       file_size = len(form['file_contents'][0])
   2611       upload_processed = True
   2612     self.send_response(200)
   2613     self.end_headers()
   2614     if upload_processed:
   2615       self.wfile.write('Upload complete (%d bytes)' % file_size)
   2616     else:
   2617       self.wfile.write('No file contents uploaded')
   2618 
   2619   GET_REQUEST_HANDLERS = {
   2620     'create_file_of_size': _CreateFileOfSizeHandler,
   2621     'delete_file': _DeleteFileHandler,
   2622     'start_upload': _StartUploadHandler,
   2623     'favicon.ico': _IgnoreHandler,
   2624   }
   2625 
   2626   POST_REQUEST_HANDLERS = {
   2627     'process_upload': _ProcessUploadHandler,
   2628   }
   2629 
   2630   def translate_path(self, path):
   2631     """Ensures files are served from the given document root.
   2632 
   2633     Overridden from SimpleHTTPServer.SimpleHTTPRequestHandler.
   2634     """
   2635     path = urlparse.urlparse(path)[2]
   2636     path = posixpath.normpath(urllib.unquote(path))
   2637     words = path.split('/')
   2638     words = filter(None, words)  # Remove empty strings from |words|.
   2639     path = self.server.docroot
   2640     for word in words:
   2641       _, word = os.path.splitdrive(word)
   2642       _, word = os.path.split(word)
   2643       if word in (os.curdir, os.pardir):
   2644         continue
   2645       path = os.path.join(path, word)
   2646     return path
   2647 
   2648   def do_GET(self):
   2649     """Processes a GET request to the local server.
   2650 
   2651     Overridden from SimpleHTTPServer.SimpleHTTPRequestHandler.
   2652     """
   2653     split_url = urlparse.urlsplit(self.path)
   2654     base_path = split_url[2]
   2655     if base_path.startswith('/'):
   2656       base_path = base_path[1:]
   2657     args = urlparse.parse_qs(split_url[3])
   2658     if base_path in self.GET_REQUEST_HANDLERS:
   2659       self.GET_REQUEST_HANDLERS[base_path](self, args)
   2660     else:
   2661       SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
   2662 
   2663   def do_POST(self):
   2664     """Processes a POST request to the local server.
   2665 
   2666     Overridden from SimpleHTTPServer.SimpleHTTPRequestHandler.
   2667     """
   2668     form = urlparse.parse_qs(
   2669         self.rfile.read(int(self.headers.getheader('Content-Length'))))
   2670     path = urlparse.urlparse(self.path)[2]
   2671     if path.startswith('/'):
   2672       path = path[1:]
   2673     if path in self.POST_REQUEST_HANDLERS:
   2674       self.POST_REQUEST_HANDLERS[path](self, form)
   2675     else:
   2676       self.send_response(200)
   2677       self.send_header('Content-Type', 'text/plain')
   2678       self.end_headers()
   2679       self.wfile.write('No handler for POST request "%s".' % path)
   2680 
   2681 
   2682 class ThreadedHTTPServer(SocketServer.ThreadingMixIn,
   2683                          BaseHTTPServer.HTTPServer):
   2684   def __init__(self, server_address, handler_class):
   2685     BaseHTTPServer.HTTPServer.__init__(self, server_address, handler_class)
   2686 
   2687 
   2688 class PerfTestServer(object):
   2689   """Local server for use by performance tests."""
   2690 
   2691   def __init__(self, docroot):
   2692     """Initializes the performance test server.
   2693 
   2694     Args:
   2695       docroot: The directory from which to serve files.
   2696     """
   2697     # The use of 0 means to start the server on an arbitrary available port.
   2698     self._server = ThreadedHTTPServer(('', 0),
   2699                                       PerfTestServerRequestHandler)
   2700     self._server.docroot = docroot
   2701     self._server_thread = threading.Thread(target=self._server.serve_forever)
   2702 
   2703   def Run(self):
   2704     """Starts the server thread."""
   2705     self._server_thread.start()
   2706 
   2707   def ShutDown(self):
   2708     """Shuts down the server."""
   2709     self._server.shutdown()
   2710     self._server_thread.join()
   2711 
   2712   def GetPort(self):
   2713     """Identifies the port number to which the server is currently bound.
   2714 
   2715     Returns:
   2716       The numeric port number to which the server is currently bound.
   2717     """
   2718     return self._server.server_address[1]
   2719 
   2720 
   2721 if __name__ == '__main__':
   2722   pyauto_functional.Main()
   2723