Home | History | Annotate | Download | only in subcommands
      1 # Copyright 2013 The Chromium Authors. All rights reserved.
      2 # Use of this source code is governed by a BSD-style license that can be
      3 # found in the LICENSE file.
      4 
      5 import logging
      6 import sys
      7 
      8 from lib.bucket import BUCKET_ID, COMMITTED, ALLOC_COUNT, FREE_COUNT
      9 from lib.policy import PolicySet
     10 from lib.subcommand import SubCommand
     11 
     12 
     13 LOGGER = logging.getLogger('dmprof')
     14 
     15 
     16 class ExpandCommand(SubCommand):
     17   def __init__(self):
     18     super(ExpandCommand, self).__init__(
     19         'Usage: %prog expand <dump> <policy> <component> <depth>')
     20     self._parser.add_option('--alternative-dirs', dest='alternative_dirs',
     21                             metavar='/path/on/target@/path/on/host[:...]',
     22                             help='Read files in /path/on/host/ instead of '
     23                                  'files in /path/on/target/.')
     24 
     25   def do(self, sys_argv):
     26     options, args = self._parse_args(sys_argv, 4)
     27     dump_path = args[1]
     28     target_policy = args[2]
     29     component_name = args[3]
     30     depth = args[4]
     31     alternative_dirs_dict = {}
     32 
     33     policy_set = PolicySet.load(SubCommand._parse_policy_list(target_policy))
     34     if not policy_set[target_policy].find_rule(component_name):
     35       sys.stderr.write("ERROR: Component %s not found in policy %s\n"
     36           % (component_name, target_policy))
     37       return 1
     38 
     39     if options.alternative_dirs:
     40       for alternative_dir_pair in options.alternative_dirs.split(':'):
     41         target_path, host_path = alternative_dir_pair.split('@', 1)
     42         alternative_dirs_dict[target_path] = host_path
     43     (bucket_set, dump) = SubCommand.load_basic_files(
     44         dump_path, False, alternative_dirs=alternative_dirs_dict)
     45 
     46     ExpandCommand._output(dump, policy_set[target_policy], bucket_set,
     47                           component_name, int(depth), sys.stdout)
     48     return 0
     49 
     50   @staticmethod
     51   def _output(dump, policy, bucket_set, component_name, depth, out):
     52     """Prints all stacktraces in a given component of given depth.
     53 
     54     Args:
     55         dump: A Dump object.
     56         policy: A Policy object.
     57         bucket_set: A BucketSet object.
     58         component_name: A name of component for filtering.
     59         depth: An integer representing depth to be printed.
     60         out: An IO object to output.
     61     """
     62     sizes = {}
     63 
     64     ExpandCommand._accumulate(
     65         dump, policy, bucket_set, component_name, depth, sizes)
     66 
     67     sorted_sizes_list = sorted(
     68         sizes.iteritems(), key=(lambda x: x[1]), reverse=True)
     69     total = 0
     70     # TODO(dmikurube): Better formatting.
     71     for size_pair in sorted_sizes_list:
     72       out.write('%10d %s\n' % (size_pair[1], size_pair[0]))
     73       total += size_pair[1]
     74     LOGGER.info('total: %d\n' % total)
     75 
     76   @staticmethod
     77   def _add_size(precedence, bucket, depth, committed, sizes):
     78     stacktrace_sequence = precedence
     79     for function, sourcefile in zip(
     80         bucket.symbolized_stackfunction[
     81             0 : min(len(bucket.symbolized_stackfunction), 1 + depth)],
     82         bucket.symbolized_stacksourcefile[
     83             0 : min(len(bucket.symbolized_stacksourcefile), 1 + depth)]):
     84       stacktrace_sequence += '%s(@%s) ' % (function, sourcefile)
     85     if not stacktrace_sequence in sizes:
     86       sizes[stacktrace_sequence] = 0
     87     sizes[stacktrace_sequence] += committed
     88 
     89   @staticmethod
     90   def _accumulate(dump, policy, bucket_set, component_name, depth, sizes):
     91     rule = policy.find_rule(component_name)
     92     if not rule:
     93       pass
     94     elif rule.allocator_type == 'malloc':
     95       for line in dump.iter_stacktrace:
     96         words = line.split()
     97         bucket = bucket_set.get(int(words[BUCKET_ID]))
     98         if not bucket or bucket.allocator_type == 'malloc':
     99           component_match = policy.find_malloc(bucket)
    100         elif bucket.allocator_type == 'mmap':
    101           continue
    102         else:
    103           assert False
    104         if component_match == component_name:
    105           precedence = ''
    106           precedence += '(alloc=%d) ' % int(words[ALLOC_COUNT])
    107           precedence += '(free=%d) ' % int(words[FREE_COUNT])
    108           if bucket.typeinfo:
    109             precedence += '(type=%s) ' % bucket.symbolized_typeinfo
    110             precedence += '(type.name=%s) ' % bucket.typeinfo_name
    111           ExpandCommand._add_size(precedence, bucket, depth,
    112                                   int(words[COMMITTED]), sizes)
    113     elif rule.allocator_type == 'mmap':
    114       for _, region in dump.iter_map:
    115         if region[0] != 'hooked':
    116           continue
    117         component_match, bucket = policy.find_mmap(region, bucket_set)
    118         if component_match == component_name:
    119           ExpandCommand._add_size('', bucket, depth,
    120                                   region[1]['committed'], sizes)
    121