Home | History | Annotate | Download | only in metrics
      1 # Copyright 2013 The Chromium Authors. All rights reserved.
      2 # Use of this source code is governed by a BSD-style license that can be
      3 # found in the LICENSE file.
      4 
      5 import sys
      6 
      7 from metrics import Metric
      8 from telemetry.value import histogram
      9 from telemetry.value import histogram_util
     10 from telemetry.value import scalar
     11 
     12 
     13 _HISTOGRAMS = [
     14     {
     15         'name': 'V8.MemoryExternalFragmentationTotal', 'units': 'percent',
     16         'display_name': 'V8_MemoryExternalFragmentationTotal',
     17         'type': histogram_util.RENDERER_HISTOGRAM,
     18         'description': 'Total external memory fragmentation after each GC in '
     19                        'percent.',
     20     },
     21     {
     22         'name': 'V8.MemoryHeapSampleTotalCommitted', 'units': 'kb',
     23         'display_name': 'V8_MemoryHeapSampleTotalCommitted',
     24         'type': histogram_util.RENDERER_HISTOGRAM,
     25         'description': 'The total size of committed memory used by V8 after '
     26                        'each GC in KB.'
     27     },
     28     {
     29         'name': 'V8.MemoryHeapSampleTotalUsed', 'units': 'kb',
     30         'display_name': 'V8_MemoryHeapSampleTotalUsed',
     31         'type': histogram_util.RENDERER_HISTOGRAM,
     32         'description': 'The total size of live memory used by V8 after each '
     33                        'GC in KB.',
     34     },
     35     {
     36         'name': 'V8.MemoryHeapSampleMaximumCommitted', 'units': 'kb',
     37         'display_name': 'V8_MemoryHeapSampleMaximumCommitted',
     38         'type': histogram_util.RENDERER_HISTOGRAM
     39     },
     40     {
     41         'name': 'Memory.RendererUsed', 'units': 'kb',
     42         'display_name': 'Memory_RendererUsed',
     43         'type': histogram_util.RENDERER_HISTOGRAM
     44     },
     45     {
     46         'name': 'Memory.BrowserUsed', 'units': 'kb',
     47         'display_name': 'Memory_BrowserUsed',
     48         'type': histogram_util.BROWSER_HISTOGRAM
     49     },
     50 ]
     51 
     52 class MemoryMetric(Metric):
     53   """MemoryMetric gathers memory statistics from the browser object.
     54 
     55   This includes both per-page histogram stats, most about javascript
     56   memory usage, and overall memory stats from the system for the whole
     57   test run."""
     58 
     59   def __init__(self, browser):
     60     super(MemoryMetric, self).__init__()
     61     self._browser = browser
     62     start_memory_stats = self._browser.memory_stats
     63     self._start_commit_charge = None
     64     if 'SystemCommitCharge' in start_memory_stats:
     65       self._start_commit_charge = start_memory_stats['SystemCommitCharge']
     66     self._memory_stats = None
     67     self._histogram_start = dict()
     68     self._histogram_delta = dict()
     69 
     70   @classmethod
     71   def CustomizeBrowserOptions(cls, options):
     72     options.AppendExtraBrowserArgs([
     73         '--enable-stats-collection-bindings',
     74         # For a hard-coded set of Google pages (such as GMail), we produce
     75         # custom memory histograms (V8.Something_gmail) instead of the generic
     76         # histograms (V8.Something), if we detect that a renderer is only
     77         # rendering this page and no other pages. For this test, we need to
     78         # disable histogram customizing, so that we get the same generic
     79         # histograms produced for all pages.
     80         '--disable-histogram-customizer'
     81     ])
     82 
     83   def Start(self, page, tab):
     84     """Start the per-page preparation for this metric.
     85 
     86     Here, this consists of recording the start value of all the histograms.
     87     """
     88     for h in _HISTOGRAMS:
     89       histogram_data = histogram_util.GetHistogram(
     90           h['type'], h['name'], tab)
     91       # Histogram data may not be available
     92       if not histogram_data:
     93         continue
     94       self._histogram_start[h['name']] = histogram_data
     95 
     96   def Stop(self, page, tab):
     97     """Prepare the results for this page.
     98 
     99     The results are the differences between the current histogram values
    100     and the values when Start() was called.
    101     """
    102     assert self._histogram_start, 'Must call Start() first'
    103     for h in _HISTOGRAMS:
    104       # Histogram data may not be available
    105       if h['name'] not in self._histogram_start:
    106         continue
    107       histogram_data = histogram_util.GetHistogram(
    108           h['type'], h['name'], tab)
    109       self._histogram_delta[h['name']] = histogram_util.SubtractHistogram(
    110           histogram_data, self._histogram_start[h['name']])
    111 
    112   # Optional argument trace_name is not in base class Metric.
    113   # pylint: disable=W0221
    114   def AddResults(self, tab, results, trace_name=None):
    115     """Add results for this page to the results object."""
    116     assert self._histogram_delta, 'Must call Stop() first'
    117     for h in _HISTOGRAMS:
    118       # Histogram data may not be available
    119       if h['name'] not in self._histogram_start:
    120         continue
    121       results.AddValue(histogram.HistogramValue(
    122           results.current_page, h['display_name'], h['units'],
    123           raw_value_json=self._histogram_delta[h['name']], important=False,
    124           description=h.get('description')))
    125     self._memory_stats = self._browser.memory_stats
    126     if not self._memory_stats['Browser']:
    127       return
    128     AddResultsForProcesses(results, self._memory_stats,
    129                            metric_trace_name=trace_name)
    130 
    131     if self._start_commit_charge:
    132       end_commit_charge = self._memory_stats['SystemCommitCharge']
    133       commit_charge_difference = end_commit_charge - self._start_commit_charge
    134       results.AddValue(scalar.ScalarValue(
    135           results.current_page,
    136           'commit_charge.' + (trace_name or 'commit_charge'),
    137           'kb', commit_charge_difference, important=False,
    138           description='System commit charge (committed memory pages).'))
    139     results.AddValue(scalar.ScalarValue(
    140         results.current_page, 'processes.' + (trace_name or 'processes'),
    141         'count', self._memory_stats['ProcessCount'], important=False,
    142         description='Number of processes used by Chrome.'))
    143 
    144 
    145 def AddResultsForProcesses(results, memory_stats, chart_trace_name='final',
    146                            metric_trace_name=None,
    147                            exclude_metrics=None):
    148   """Adds memory stats for browser, renderer and gpu processes.
    149 
    150   Args:
    151     results: A telemetry.results.PageTestResults object.
    152     memory_stats: System memory stats collected.
    153     chart_trace_name: Trace to identify memory metrics. Default is 'final'.
    154     metric_trace_name: Trace to identify the metric results per test page.
    155     exclude_metrics: List of memory metrics to exclude from results,
    156                      e.g. VM, WorkingSetSize, etc.
    157   """
    158   metric = 'resident_set_size'
    159   if sys.platform == 'win32':
    160     metric = 'working_set'
    161 
    162   exclude_metrics = exclude_metrics or {}
    163 
    164   def AddResultsForProcessTypes(process_types_memory, process_type_trace):
    165     """Add all results for a given set of process types.
    166 
    167     Args:
    168       process_types_memory: A list of process types, e.g. Browser, 'Renderer'.
    169       process_type_trace: The name of this set of process types in the output.
    170     """
    171     def AddResult(value_name_memory, value_name_trace, description):
    172       """Add a result for a given statistic.
    173 
    174       Args:
    175         value_name_memory: Name of some statistic, e.g. VM, WorkingSetSize.
    176         value_name_trace: Name of this statistic to be used in the output.
    177       """
    178       if value_name_memory in exclude_metrics:
    179         return
    180       if len(process_types_memory) > 1 and value_name_memory.endswith('Peak'):
    181         return
    182       values = []
    183       for process_type_memory in process_types_memory:
    184         stats = memory_stats[process_type_memory]
    185         if value_name_memory in stats:
    186           values.append(stats[value_name_memory])
    187       if values:
    188         if metric_trace_name:
    189           current_trace = '%s_%s' % (metric_trace_name, process_type_trace)
    190           chart_name = value_name_trace
    191         else:
    192           current_trace = '%s_%s' % (value_name_trace, process_type_trace)
    193           chart_name = current_trace
    194         results.AddValue(scalar.ScalarValue(
    195             results.current_page, '%s.%s' % (chart_name, current_trace), 'kb',
    196             sum(values) / 1024, important=False, description=description))
    197 
    198     AddResult('VM', 'vm_%s_size' % chart_trace_name,
    199               'Virtual Memory Size (address space allocated).')
    200     AddResult('WorkingSetSize', 'vm_%s_%s_size' % (metric, chart_trace_name),
    201               'Working Set Size (Windows) or Resident Set Size (other '
    202               'platforms).')
    203     AddResult('PrivateDirty', 'vm_private_dirty_%s' % chart_trace_name,
    204               'Private Dirty is basically the amount of RAM inside the '
    205               'process that can not be paged to disk (it is not backed by the '
    206               'same data on disk), and is not shared with any other '
    207               'processes. Another way to look at this is the RAM that will '
    208               'become available to the system when that process goes away '
    209               '(and probably quickly subsumed into caches and other uses of '
    210               'it).')
    211     AddResult('ProportionalSetSize',
    212               'vm_proportional_set_size_%s' % chart_trace_name,
    213               'The Proportional Set Size (PSS) number is a metric the kernel '
    214               'computes that takes into account memory sharing -- basically '
    215               'each page of RAM in a process is scaled by a ratio of the '
    216               'number of other processes also using that page. This way you '
    217               'can (in theory) add up the PSS across all processes to see '
    218               'the total RAM they are using, and compare PSS between '
    219               'processes to get a rough idea of their relative weight.')
    220     AddResult('SharedDirty', 'vm_shared_dirty_%s' % chart_trace_name,
    221               'Shared Dirty is the amount of RAM outside the process that can '
    222               'not be paged to disk, and is shared with other processes.')
    223     AddResult('VMPeak', 'vm_peak_size',
    224               'The peak Virtual Memory Size (address space allocated) usage '
    225               'achieved by the * process.')
    226     AddResult('WorkingSetSizePeak', '%s_peak_size' % metric,
    227               'Peak Working Set Size.')
    228 
    229   AddResultsForProcessTypes(['Browser'], 'browser')
    230   AddResultsForProcessTypes(['Renderer'], 'renderer')
    231   AddResultsForProcessTypes(['Gpu'], 'gpu')
    232   AddResultsForProcessTypes(['Browser', 'Renderer', 'Gpu'], 'total')
    233