1 #!/usr/bin/python 2 3 # Copyright 2017 The Chromium OS Authors. All rights reserved. 4 # Use of this source code is governed by a BSD-style license that can be 5 # found in the LICENSE file. 6 7 """Load generator for devserver.""" 8 9 import argparse 10 import itertools 11 import json 12 import pprint 13 import re 14 import sys 15 16 import common 17 from chromite.lib import commandline 18 from chromite.lib import cros_logging as logging 19 20 21 # Default keys to skip displaying. 22 DEFAULT_SKIP = [ 23 'build_name', 24 'devserver', 25 'name', 26 'parent', 27 'quick_provision', 28 'trigger_response', 29 ] 30 31 # List of commandline arguments for easy filtering. 32 FILTER_ARGS = [ 33 'board', 34 'build_name', 35 'devserver', 36 'name', 37 'status', 38 ] 39 40 41 def get_parser(): 42 """Creates the argparse parser.""" 43 parser = commandline.ArgumentParser(description=__doc__) 44 parser.add_argument('infile', nargs='*', type=argparse.FileType('r'), 45 help='Path to JSON file to read.', 46 default=[sys.stdin]) 47 parser.add_argument('--boards', type=str, action='store', 48 help='Boards to show.') 49 parser.add_argument('--group', type=str, action='store', 50 help='Comma-spearated list of keys to group by.') 51 parser.add_argument('--dump', action='store_true', 52 help='Dump all filtered entries.') 53 parser.add_argument('--skip', type=str, action='store', 54 help='Comma-separated list of keys to skip displaying.', 55 default=','.join(DEFAULT_SKIP)) 56 parser.add_argument('--filter', type=str, action='store', 57 help='Filter expression to apply to each node.') 58 for arg in FILTER_ARGS: 59 parser.add_argument('--%s' % arg, type=str, action='store', 60 help='Comma-separated list of %s to filter by.' % 61 arg) 62 parser.add_argument('--no-summary', action='store_false', dest='summary', 63 help='Disable summary.') 64 65 return parser 66 67 def summarize_entries(entries, skip=set()): 68 """Summarize a list of entries.""" 69 TAG_KEYS = [ 70 'board', 'build_name', 'devserver', 'name', 71 'parent', 'quick_provision', 'status' 72 ] 73 VALUE_KEYS = [ 74 'avg_active', 'elapsed', 75 ] 76 summary = { 77 'COUNT': len(entries), 78 } 79 summary.update({key: summarize_tags(entries, key) for key in TAG_KEYS 80 if key not in skip}) 81 summary.update({key: summarize_values(entries, key) for key in VALUE_KEYS 82 if key not in skip}) 83 return summary 84 85 def summarize_tags(entries, key): 86 """Summarize all the different string values for a given key.""" 87 tags = {str(entry[key]) for entry in entries} 88 return list(tags) 89 90 def summarize_values(entries, key): 91 """Summarize the numeric values for a given key.""" 92 if entries is None or len(entries) == 0: 93 return None 94 95 values = [entry[key] for entry in entries if key in entry] 96 summary = {} 97 num_values = len(values) 98 if num_values: 99 summary['min'] = min(values) 100 summary['max'] = max(values) 101 summary['avg'] = sum(values) / num_values 102 num_skipped = len(entries) - num_values 103 if num_skipped: 104 summary['num'] = num_values 105 summary['skipped'] = num_skipped 106 return summary 107 108 def group_entries(keys, entries): 109 """Group entries based on different values of given keys. 110 111 @param keys: A list of keys to group by. 112 @param entries: A list of entries to split into groups. 113 114 @return A list of list of entries, where each list has a different key 115 value. 116 """ 117 if not keys: 118 return [entries] 119 120 # Divide the group based on the first key. 121 indexed = {} 122 for entry in entries: 123 value = str(entry[keys[0]]) 124 indexed.setdefault(value, []).append(entry) 125 groups = [indexed[value] for value in sorted(indexed.keys())] 126 127 # Recursively subdivide all the groups based on the rest of the keys. 128 subgroups = [] 129 for group in groups: 130 subgroups.extend(group_entries(keys[1:], group)) 131 return subgroups 132 133 def main(argv): 134 """Load generator for a devserver.""" 135 parser = get_parser() 136 options = parser.parse_args(argv) 137 138 # Read entries from the specified file. 139 all_entries = [] 140 for f in options.infile: 141 all_entries.extend([json.loads(line) for line in f]) 142 143 # Filter entries: 144 # - Ignore non-provisions. 145 # - Filter via the specified FILTER_ARGS arguments. 146 # - Filter via explicit filter request. 147 entries = filter(lambda x: x['name'] != 'Runner', all_entries) 148 for arg in FILTER_ARGS: 149 if options.__dict__.get(arg): 150 entries = filter(lambda x: x[arg] in 151 options.__dict__[arg].split(','), 152 entries) 153 if options.filter: 154 entries = filter(lambda x: eval(options.filter, {'re': re}, x), entries) 155 156 # Group the entries based on specified keys. 157 groups = group_entries(options.group.split(',') if options.group else None, 158 entries) 159 160 # Dump all filtered entries as groups, including their parents. 161 if options.dump: 162 dump_entries = itertools.chain(*groups) 163 # Dump all entries, tracking needed parents. 164 parents = [] 165 for entry in dump_entries: 166 print(json.dumps(entry)) 167 if 'parent' in entry and entry['parent'] not in parents: 168 parents.append(entry['parent']) 169 # Dump all parents. 170 for entry in all_entries: 171 if entry['id'] in parents: 172 print(json.dumps(entry)) 173 174 # Summarize the entries, group by group. 175 if options.summary: 176 skip = options.skip.split(',') if options.skip else set() 177 summaries = [summarize_entries(group, skip) for group in groups] 178 print(json.dumps(summaries, indent=2)) 179 180 if __name__ == '__main__': 181 sys.exit(main(sys.argv[1:])) 182