Home | History | Annotate | Download | only in tracing_build
      1 # Copyright 2016 The Chromium Authors. All rights reserved.
      2 # Use of this source code is governed by a BSD-style license that can be
      3 # found in the LICENSE file.
      4 
      5 import argparse
      6 import codecs
      7 import collections
      8 import gzip
      9 import itertools
     10 import json
     11 import logging
     12 import os
     13 import sys
     14 
     15 # Add tracing/ to the path.
     16 sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
     17 from tracing_build import html2trace, trace2html
     18 
     19 
     20 GZIP_FILENAME_SUFFIX = '.gz'
     21 HTML_FILENAME_SUFFIX = '.html'
     22 
     23 
     24 # Relevant trace event phases. See
     25 # https://code.google.com/p/chromium/codesearch#chromium/src/base/trace_event/common/trace_event_common.h.
     26 METADATA_PHASE = 'M'
     27 MEMORY_DUMP_PHASE = 'v'
     28 BEGIN_PHASE = 'B'
     29 END_PHASE = 'E'
     30 CLOCK_SYNC_EVENT_PHASE = 'c'
     31 
     32 
     33 # Minimum gap between two consecutive merged traces in microseconds.
     34 MIN_TRACE_GAP_IN_US = 1000000
     35 
     36 
     37 # Rule for matching IDs in an IdMap. For a given level, |match| should be a
     38 # named tuple class where its fields determine the importance of |entry._items|
     39 # for the purposes of matching pairs of IdMap entries.
     40 IdMapLevel = collections.namedtuple('IdMapLevel', ['name', 'match'])
     41 
     42 
     43 class IdMap(object):
     44   """Abstract class for merging and mapping IDs from multiple sources."""
     45 
     46   # Sub-classes must provide a tuple of |IdMapLevel| objects.
     47   LEVELS = NotImplemented
     48 
     49   def __init__(self, depth=0):
     50     assert 0 <= depth <= len(self.LEVELS)
     51     self._depth = depth
     52 
     53     if depth > 0:
     54       # Non-root node.
     55       self._canonical_id = None
     56       self._items = collections.defaultdict(set)
     57       self._sources = set()
     58 
     59     if depth < len(self.LEVELS):
     60       # Non-leaf node.
     61       self._entry_map = {}  # (Source, Entry ID) -> Entry.
     62 
     63   @property
     64   def max_mapped_id(self):
     65     """The maximum mapped ID of this map's entries."""
     66     if not self._entry_map:
     67       return 0
     68     return max(e._canonical_id for e in self._entry_map.itervalues())
     69 
     70   def AddEntry(self, source, path, **items):
     71     """Add a source-specific entry path to the map.
     72 
     73     Args:
     74       source: The source of the entry (e.g. trace filename).
     75       path: A path of source-specific entry IDs in the map (e.g. [PID, TID]).
     76       **items: Dictionary of items (or sets of items) to be appended to the
     77           target entry's |_items|.
     78     """
     79     if path:
     80       return self._GetSubMapEntry(source, path[0]).AddEntry(source, path[1:],
     81                                                             **items)
     82     assert 'id' not in items  # ID is set according to the path.
     83     for key, value in items.iteritems():
     84       value_set = self._items[key]
     85       if (isinstance(value, collections.Iterable) and
     86           not isinstance(value, basestring)):
     87         value_set.update(value)
     88       else:
     89         value_set.add(value)
     90 
     91   def MapEntry(self, source, path):
     92     """Map an source-specific entry ID path to a canonical entry ID path.
     93 
     94     Args:
     95       source: The source of the entry (e.g. trace filename).
     96       path: A path of source-specific entry IDs in the map (e.g. [PID, TID]).
     97 
     98     Returns:
     99       A path of canonical entry IDs in the map to which the provided path of
    100       source-specific entry IDs is mapped.
    101     """
    102     if not path:
    103       return ()
    104     entry = self._entry_map[(source, path[0])]
    105     return (entry._canonical_id,) + entry.MapEntry(source, path[1:])
    106 
    107   def MergeEntries(self):
    108     """Recursively merge the entries in this map.
    109 
    110     Example: Suppose that the following entries were added to the map:
    111 
    112       map.AddEntry(source='trace_A.json', path=[10], name='Browser')
    113       map.AddEntry(source='trace_A.json', path=[20], name='Renderer')
    114       map.AddEntry(source='trace_B.json', path=[30], name='Browser')
    115 
    116     Before merging, |map._entry_map| will contain three separate items:
    117 
    118       ('trace_A.json', 10) -> IdMap(_items={id: {10}, name: {'Browser'}},
    119                                     _sources={'trace_A.json'})
    120       ('trace_A.json', 20) -> IdMap(_items={id: {20}, name: {'Renderer'}},
    121                                     _sources={'trace_A.json'})
    122       ('trace_B.json', 30) -> IdMap(_items={id: {30}, name: {'Browser'}},
    123                                     _sources={'trace_B.json'})
    124 
    125     Since the first two entries come from the same source, they cannot be
    126     merged. On the other hand, the third entry could be merged with either of
    127     the first two. Since it has a common name with the first entry, it will be
    128     merged with it in this method:
    129 
    130       ('trace_A.json', 10) -> IdMap(_items={id: {10, 30}, name: {'Browser'}},
    131                                     _sources={'trace_A.json', 'trace_B.json'})
    132       ('trace_A.json', 20) -> IdMap(_items={id: {20}, name: {Renderer}},
    133                                     _sources={'trace_A.json'})
    134       ('trace_B.json', 30) -> <same IdMap as ('trace_A.json', 10)>
    135 
    136     Pairs of entries will be merged in a descending order of sizes of
    137     pair-wise intersections of |entry._items| until there are no two entries
    138     such that (1) they have at least one value in |entry._items| in common and
    139     (2) they are mergeable (i.e. have no common source). Afterwards, unique IDs
    140     are assigned to the resulting "canonical" entries and their sub-entries are
    141     merged recursively.
    142     """
    143     if self._depth == len(self.LEVELS):
    144       return
    145 
    146     logging.debug('Merging %r entries in %s...', self.LEVELS[self._depth].name,
    147                   self)
    148 
    149     canonical_entries = self._CanonicalizeEntries()
    150     self._AssignIdsToCanonicalEntries(canonical_entries)
    151 
    152     for entry in canonical_entries:
    153       entry.MergeEntries()
    154 
    155   def _GetSubMapEntry(self, source, entry_id):
    156     full_id = (source, entry_id)
    157     entry = self._entry_map.get(full_id)
    158     if entry is None:
    159       entry = type(self)(self._depth + 1)
    160       entry._sources.add(source)
    161       entry._items['id'].add(entry_id)
    162       self._entry_map[full_id] = entry
    163     return entry
    164 
    165   def _CalculateUnmergeableMapFromEntrySources(self):
    166     entry_ids_by_source = collections.defaultdict(set)
    167     for entry_id, entry in self._entry_map.iteritems():
    168       for source in entry._sources:
    169         entry_ids_by_source[source].add(entry_id)
    170 
    171     unmergeable_map = collections.defaultdict(set)
    172     for unmergeable_set in entry_ids_by_source.itervalues():
    173       for entry_id in unmergeable_set:
    174         unmergeable_map[entry_id].update(unmergeable_set - {entry_id})
    175 
    176     return unmergeable_map
    177 
    178   def _IsMergeableWith(self, other):
    179     return self._sources.isdisjoint(other._sources)
    180 
    181   def _GetMatch(self, other):
    182     match_cls = self.LEVELS[self._depth - 1].match
    183     return match_cls(*(self._items[f] & other._items[f]
    184                        for f in match_cls._fields))
    185 
    186   def _MergeFrom(self, source):
    187     if self._depth > 0:
    188       # This is NOT a ROOT node, so we need to merge fields and sources from
    189       # the source node.
    190       for key, values in source._items.iteritems():
    191         self._items[key].update(values)
    192       self._sources.update(source._sources)
    193 
    194     if self._depth < len(self.LEVELS):
    195       # This is NOT a LEAF node, so we need to copy over entries from the
    196       # source node's entry map.
    197       assert not (set(self._entry_map.iterkeys()) &
    198                   set(source._entry_map.iterkeys()))
    199       self._entry_map.update(source._entry_map)
    200 
    201   def _CanonicalizeEntries(self):
    202     canonical_entries = self._entry_map.copy()
    203 
    204     # {ID1, ID2} -> Match between the two entries.
    205     matches = {frozenset([full_id1, full_id2]): entry1._GetMatch(entry2)
    206                for full_id1, entry1 in canonical_entries.iteritems()
    207                for full_id2, entry2 in canonical_entries.iteritems()
    208                if entry1._IsMergeableWith(entry2)}
    209 
    210     while matches:
    211       # Pop the maximum match from the dictionary.
    212       max_match_set, max_match = max(matches.iteritems(),
    213                                      key=lambda (_, v): map(len, v))
    214       del matches[max_match_set]
    215       canonical_full_id, merged_full_id = max_match_set
    216 
    217       # Skip pairs of entries that have nothing in common.
    218       if not any(max_match):
    219         continue
    220 
    221       # Merge the entries and update the map to reflect this.
    222       canonical_entry = canonical_entries[canonical_full_id]
    223       merged_entry = canonical_entries.pop(merged_full_id)
    224       logging.debug('Merging %s into %s [match=%s]...', merged_entry,
    225                     canonical_entry, max_match)
    226       canonical_entry._MergeFrom(merged_entry)
    227       del merged_entry
    228       self._entry_map[merged_full_id] = canonical_entry
    229 
    230       for match_set in matches.keys():
    231         if merged_full_id in match_set:
    232           # Remove other matches with the merged entry.
    233           del matches[match_set]
    234         elif canonical_full_id in match_set:
    235           [other_full_id] = match_set - {canonical_full_id}
    236           other_entry = canonical_entries[other_full_id]
    237           if canonical_entry._IsMergeableWith(other_entry):
    238             # Update other matches with the canonical entry which are still
    239             # mergeable.
    240             matches[match_set] = canonical_entry._GetMatch(other_entry)
    241           else:
    242             # Remove other matches with the canonical entry which have become
    243             # unmergeable.
    244             del matches[match_set]
    245 
    246     return canonical_entries.values()
    247 
    248   def _AssignIdsToCanonicalEntries(self, canonical_entries):
    249     assigned_ids = set()
    250     canonical_entries_without_assigned_ids = set()
    251 
    252     # Try to assign each canonical entry to one of the IDs from which it was
    253     # merged.
    254     for canonical_entry in canonical_entries:
    255       candidate_ids = canonical_entry._items['id']
    256       try:
    257         assigned_id = next(candidate_id for candidate_id in candidate_ids
    258                            if candidate_id not in assigned_ids)
    259       except StopIteration:
    260         canonical_entries_without_assigned_ids.add(canonical_entry)
    261         continue
    262       assigned_ids.add(assigned_id)
    263       canonical_entry._canonical_id = assigned_id
    264 
    265     # For canonical entries where this cannot be done (highly unlikely), scan
    266     # from the minimal merged ID upwards for the first unassigned ID.
    267     for canonical_entry in canonical_entries_without_assigned_ids:
    268       assigned_id = next(candidate_id for candidate_id in
    269                          itertools.count(min(canonical_entry._items['id']))
    270                          if candidate_id not in assigned_ids)
    271       assigned_ids.add(assigned_id)
    272       canonical_entry._canonical_id = assigned_id
    273 
    274   def __repr__(self):
    275     cls_name = type(self).__name__
    276     if self._depth == 0:
    277       return '%s root' % cls_name
    278     else:
    279       return '%s %s entry(%s)' % (cls_name, self.LEVELS[self._depth - 1].name,
    280                                   self._items)
    281 
    282 
    283 class ProcessIdMap(IdMap):
    284   """Class for merging and mapping PIDs and TIDs from multiple sources."""
    285 
    286   LEVELS = (
    287       IdMapLevel(name='process',
    288                  match=collections.namedtuple('ProcessMatch',
    289                                               ['name', 'id', 'label'])),
    290       IdMapLevel(name='thread',
    291                  match=collections.namedtuple('ThreadMatch', ['name', 'id']))
    292   )
    293 
    294 
    295 def LoadTrace(filename):
    296   """Load a trace from a (possibly gzipped) file and return its parsed JSON."""
    297   logging.info('Loading trace %r...', filename)
    298   if filename.endswith(HTML_FILENAME_SUFFIX):
    299     return LoadHTMLTrace(filename)
    300   elif filename.endswith(GZIP_FILENAME_SUFFIX):
    301     with gzip.open(filename, 'rb') as f:
    302       return json.load(f)
    303   else:
    304     with open(filename, 'r') as f:
    305       return json.load(f)
    306 
    307 
    308 def LoadHTMLTrace(filename):
    309   """Load a trace from a vulcanized HTML trace file."""
    310   trace_components = collections.defaultdict(list)
    311 
    312   for sub_trace in html2trace.ReadTracesFromHTMLFilePath(filename):
    313     for name, component in TraceAsDict(sub_trace).iteritems():
    314       trace_components[name].append(component)
    315 
    316   trace = {}
    317   for name, components in trace_components.iteritems():
    318     if len(components) == 1:
    319       trace[name] = components[0]
    320     elif all(isinstance(component, list) for component in components):
    321       trace[name] = [e for component in components for e in component]
    322     else:
    323       trace[name] = components[0]
    324       logging.warning(
    325           'Values of repeated trace component %r in HTML trace %r are not '
    326           'lists. The first defined value of the component will be used.',
    327           filename, name)
    328 
    329   return trace
    330 
    331 
    332 def SaveTrace(trace, filename):
    333   """Save a JSON trace to a (possibly gzipped) file."""
    334   if filename is None:
    335     logging.info('Dumping trace to standard output...')
    336     print json.dumps(trace)
    337   else:
    338     logging.info('Saving trace %r...', filename)
    339     if filename.endswith(HTML_FILENAME_SUFFIX):
    340       with codecs.open(filename, mode='w', encoding='utf-8') as f:
    341         trace2html.WriteHTMLForTraceDataToFile([trace], 'Merged trace', f)
    342     elif filename.endswith(GZIP_FILENAME_SUFFIX):
    343       with gzip.open(filename, 'wb') as f:
    344         json.dump(trace, f)
    345     else:
    346       with open(filename, 'w') as f:
    347         json.dump(trace, f)
    348 
    349 
    350 def TraceAsDict(trace):
    351   """Ensure that a trace is a dictionary."""
    352   if isinstance(trace, list):
    353     return {'traceEvents': trace}
    354   return trace
    355 
    356 
    357 def MergeTraceFiles(input_trace_filenames, output_trace_filename):
    358   """Merge a collection of input trace files into an output trace file."""
    359   logging.info('Loading %d input traces...', len(input_trace_filenames))
    360   input_traces = collections.OrderedDict()
    361   for input_trace_filename in input_trace_filenames:
    362     input_traces[input_trace_filename] = LoadTrace(input_trace_filename)
    363 
    364   logging.info('Merging traces...')
    365   output_trace = MergeTraces(input_traces)
    366 
    367   logging.info('Saving output trace...')
    368   SaveTrace(output_trace, output_trace_filename)
    369 
    370   logging.info('Finished.')
    371 
    372 
    373 def MergeTraces(traces):
    374   """Merge a collection of JSON traces into a single JSON trace."""
    375   trace_components = collections.defaultdict(collections.OrderedDict)
    376 
    377   for filename, trace in traces.iteritems():
    378     for name, component in TraceAsDict(trace).iteritems():
    379       trace_components[name][filename] = component
    380 
    381   merged_trace = {}
    382   for component_name, components_by_filename in trace_components.iteritems():
    383     logging.info('Merging %d %r components...', len(components_by_filename),
    384                  component_name)
    385     merged_trace[component_name] = MergeComponents(component_name,
    386                                                    components_by_filename)
    387 
    388   return merged_trace
    389 
    390 
    391 def MergeComponents(component_name, components_by_filename):
    392   """Merge a component of multiple JSON traces into a single component."""
    393   if component_name == 'traceEvents':
    394     return MergeTraceEvents(components_by_filename)
    395   else:
    396     return MergeGenericTraceComponents(component_name, components_by_filename)
    397 
    398 
    399 def MergeTraceEvents(events_by_filename):
    400   """Merge trace events from multiple traces into a single list of events."""
    401   # Remove strings from the list of trace events
    402   # (https://github.com/catapult-project/catapult/issues/2497).
    403   events_by_filename = collections.OrderedDict(
    404       (filename, [e for e in events if not isinstance(e, basestring)])
    405       for filename, events in events_by_filename.iteritems())
    406 
    407   timestamp_range_by_filename = _AdjustTimestampRanges(events_by_filename)
    408   process_map = _CreateProcessMapFromTraceEvents(events_by_filename)
    409   merged_events = _CombineTraceEvents(events_by_filename, process_map)
    410   _RemoveSurplusClockSyncEvents(merged_events)
    411   merged_events.extend(
    412       _BuildInjectedTraceMarkerEvents(timestamp_range_by_filename, process_map))
    413   return merged_events
    414 
    415 
    416 def _RemoveSurplusClockSyncEvents(events):
    417   """Remove all clock sync events except for the first one."""
    418   # TODO(petrcermak): Figure out how to handle merging multiple clock sync
    419   # events.
    420   clock_sync_event_indices = [i for i, e in enumerate(events)
    421                               if e['ph'] == CLOCK_SYNC_EVENT_PHASE]
    422   # The indices need to be traversed from largest to smallest (hence the -1).
    423   for i in clock_sync_event_indices[:0:-1]:
    424     del events[i]
    425 
    426 
    427 def _AdjustTimestampRanges(events_by_filename):
    428   logging.info('Adjusting timestamp ranges of traces...')
    429 
    430   previous_trace_max_timestamp = 0
    431   timestamp_range_by_filename = collections.OrderedDict()
    432 
    433   for index, (filename, events) in enumerate(events_by_filename.iteritems(), 1):
    434     # Skip metadata events, the timestamps of which are always zero.
    435     non_metadata_events = [e for e in events if e['ph'] != METADATA_PHASE]
    436     if not non_metadata_events:
    437       logging.warning('Trace %r (%d/%d) only contains metadata events.',
    438                       filename, index, len(events_by_filename))
    439       timestamp_range_by_filename[filename] = None
    440       continue
    441 
    442     min_timestamp = min(e['ts'] for e in non_metadata_events)
    443     max_timestamp = max(e['ts'] for e in non_metadata_events)
    444 
    445     # Determine by how much the timestamps should be shifted.
    446     injected_timestamp_shift = max(
    447         previous_trace_max_timestamp + MIN_TRACE_GAP_IN_US - min_timestamp, 0)
    448     logging.info('Injected timestamp shift in trace %r (%d/%d): %d ms '
    449                  '[min=%d, max=%d, duration=%d].', filename, index,
    450                  len(events_by_filename), injected_timestamp_shift,
    451                  min_timestamp, max_timestamp, max_timestamp - min_timestamp)
    452 
    453     if injected_timestamp_shift > 0:
    454       # Shift the timestamps.
    455       for event in non_metadata_events:
    456         event['ts'] += injected_timestamp_shift
    457 
    458       # Adjust the range.
    459       min_timestamp += injected_timestamp_shift
    460       max_timestamp += injected_timestamp_shift
    461 
    462     previous_trace_max_timestamp = max_timestamp
    463 
    464     timestamp_range_by_filename[filename] = min_timestamp, max_timestamp
    465 
    466   return timestamp_range_by_filename
    467 
    468 
    469 def _CreateProcessMapFromTraceEvents(events_by_filename):
    470   logging.info('Creating process map from trace events...')
    471 
    472   process_map = ProcessIdMap()
    473   for filename, events in events_by_filename.iteritems():
    474     for event in events:
    475       pid, tid = event['pid'], event['tid']
    476       process_map.AddEntry(source=filename, path=(pid, tid))
    477       if event['ph'] == METADATA_PHASE:
    478         if event['name'] == 'process_name':
    479           process_map.AddEntry(source=filename, path=(pid,),
    480                                name=event['args']['name'])
    481         elif event['name'] == 'process_labels':
    482           process_map.AddEntry(source=filename, path=(pid,),
    483                                label=event['args']['labels'].split(','))
    484         elif event['name'] == 'thread_name':
    485           process_map.AddEntry(source=filename, path=(pid, tid),
    486                                name=event['args']['name'])
    487 
    488   process_map.MergeEntries()
    489   return process_map
    490 
    491 
    492 def _CombineTraceEvents(events_by_filename, process_map):
    493   logging.info('Combining trace events from all traces...')
    494 
    495   type_name_event_by_pid = {}
    496   combined_events = []
    497 
    498   for index, (filename, events) in enumerate(events_by_filename.iteritems(), 1):
    499     for event in events:
    500       if _UpdateTraceEventForMerge(event, process_map, filename, index,
    501                                    type_name_event_by_pid):
    502         combined_events.append(event)
    503 
    504   return combined_events
    505 
    506 
    507 def _UpdateTraceEventForMerge(event, process_map, filename, index,
    508                               type_name_event_by_pid):
    509   pid, tid = process_map.MapEntry(source=filename,
    510                                   path=(event['pid'], event['tid']))
    511   event['pid'], event['tid'] = pid, tid
    512 
    513   if event['ph'] == METADATA_PHASE:
    514     # Update IDs in 'stackFrames' and 'typeNames' metadata events.
    515     if event['name'] == 'stackFrames':
    516       _UpdateDictIds(index, event['args'], 'stackFrames')
    517       for frame in event['args']['stackFrames'].itervalues():
    518         _UpdateFieldId(index, frame, 'parent')
    519     elif event['name'] == 'typeNames':
    520       _UpdateDictIds(index, event['args'], 'typeNames')
    521       existing_type_name_event = type_name_event_by_pid.get(pid)
    522       if existing_type_name_event is None:
    523         type_name_event_by_pid[pid] = event
    524       else:
    525         existing_type_name_event['args']['typeNames'].update(
    526             event['args']['typeNames'])
    527         # Don't add the event to the merged trace because it has been merged
    528         # into an existing 'typeNames' metadata event for the given process.
    529         return False
    530 
    531   elif event['ph'] == MEMORY_DUMP_PHASE:
    532     # Update stack frame and type name IDs in heap dump entries in process
    533     # memory dumps.
    534     for heap_dump in event['args']['dumps'].get('heaps', {}).itervalues():
    535       for heap_entry in heap_dump['entries']:
    536         _UpdateFieldId(index, heap_entry, 'bt', ignored_values=[''])
    537         _UpdateFieldId(index, heap_entry, 'type')
    538 
    539   return True  # Events should be added to the merged trace by default.
    540 
    541 
    542 def _ConvertId(index, original_id):
    543   return '%d#%s' % (index, original_id)
    544 
    545 
    546 def _UpdateDictIds(index, parent_dict, key):
    547   parent_dict[key] = {
    548       _ConvertId(index, original_id): value
    549       for original_id, value in parent_dict[key].iteritems()}
    550 
    551 
    552 def _UpdateFieldId(index, parent_dict, key, ignored_values=()):
    553   original_value = parent_dict.get(key)
    554   if original_value is not None and original_value not in ignored_values:
    555     parent_dict[key] = _ConvertId(index, original_value)
    556 
    557 
    558 def _BuildInjectedTraceMarkerEvents(timestamp_range_by_filename, process_map):
    559   logging.info('Building injected trace marker events...')
    560 
    561   injected_pid = process_map.max_mapped_id + 1
    562 
    563   # Inject a mock process with a thread.
    564   injected_events = [
    565       {
    566           'pid': injected_pid,
    567           'tid': 0,
    568           'ph': METADATA_PHASE,
    569           'ts': 0,
    570           'name': 'process_sort_index',
    571           'args': {'sort_index': -1000}  # Show the process at the top.
    572       },
    573       {
    574           'pid': injected_pid,
    575           'tid': 0,
    576           'ph': METADATA_PHASE,
    577           'ts': 0,
    578           'name': 'process_name',
    579           'args': {'name': 'Merged traces'}
    580       },
    581       {
    582           'pid': injected_pid,
    583           'tid': 0,
    584           'ph': METADATA_PHASE,
    585           'ts': 0,
    586           'name': 'thread_name',
    587           'args': {'name': 'Trace'}
    588       }
    589   ]
    590 
    591   # Inject slices for each sub-trace denoting its beginning and end.
    592   for index, (filename, timestamp_range) in enumerate(
    593       timestamp_range_by_filename.iteritems(), 1):
    594     if timestamp_range is None:
    595       continue
    596     min_timestamp, max_timestamp = timestamp_range
    597     name = 'Trace %r (%d/%d)' % (filename, index,
    598                                  len(timestamp_range_by_filename))
    599     slice_id = 'INJECTED_TRACE_MARKER_%d' % index
    600     injected_events.extend([
    601         {
    602             'pid': injected_pid,
    603             'tid': 0,
    604             'ph': BEGIN_PHASE,
    605             'cat': 'injected',
    606             'name': name,
    607             'id': slice_id,
    608             'ts': min_timestamp
    609         },
    610         {
    611             'pid': injected_pid,
    612             'tid': 0,
    613             'ph': END_PHASE,
    614             'cat': 'injected',
    615             'name': name,
    616             'id': slice_id,
    617             'ts': max_timestamp
    618         }
    619     ])
    620 
    621   return injected_events
    622 
    623 
    624 def MergeGenericTraceComponents(component_name, components_by_filename):
    625   """Merge a generic component of multiple JSON traces into a single component.
    626 
    627   This function is only used for components that don't have a component-specific
    628   merging function (see MergeTraceEvents). It just returns the component's first
    629   provided value (in some trace).
    630   """
    631   components = components_by_filename.itervalues()
    632   first_component = next(components)
    633   if not all(c == first_component for c in components):
    634     logging.warning(
    635         'Values of trace component %r differ across the provided traces. '
    636         'The first defined value of the component will be used.',
    637         component_name)
    638   return first_component
    639 
    640 
    641 def Main(argv):
    642   parser = argparse.ArgumentParser(description='Merge multiple traces.',
    643                                    add_help=False)
    644   parser.add_argument('input_traces', metavar='INPUT_TRACE', nargs='+',
    645                       help='Input trace filename.')
    646   parser.add_argument('-h', '--help', action='help',
    647                       help='Show this help message and exit.')
    648   parser.add_argument('-o', '--output_trace', help='Output trace filename. If '
    649                       'not provided, the merged trace will be written to '
    650                       'the standard output.')
    651   parser.add_argument('-v', '--verbose', action='count', dest='verbosity',
    652                       help='Increase verbosity level.')
    653   args = parser.parse_args(argv[1:])
    654 
    655   # Set verbosity level.
    656   if args.verbosity >= 2:
    657     logging_level = logging.DEBUG
    658   elif args.verbosity == 1:
    659     logging_level = logging.INFO
    660   else:
    661     logging_level = logging.WARNING
    662   logging.getLogger().setLevel(logging_level)
    663 
    664   try:
    665     MergeTraceFiles(args.input_traces, args.output_trace)
    666     return 0
    667   except Exception:  # pylint: disable=broad-except
    668     logging.exception('Something went wrong:')
    669     return 1
    670   finally:
    671     logging.warning('This is an EXPERIMENTAL TOOL! If you encounter any '
    672                     'issues, please file a Catapult bug '
    673                     '(https://github.com/catapult-project/catapult/issues/new) '
    674                     'with your current Catapult commit hash, a description of '
    675                     'the problem and any error messages, attach the input '
    676                     'traces and notify petrcermak (at] chromium.org. Thank you!')
    677