Home | History | Annotate | Download | only in crosperf
      1 # Copyright 2011 The Chromium OS Authors. All rights reserved.
      2 # Use of this source code is governed by a BSD-style license that can be
      3 # found in the LICENSE file.
      4 """The class to show the banner."""
      5 
      6 from __future__ import print_function
      7 
      8 import collections
      9 import datetime
     10 import time
     11 
     12 
     13 class ExperimentStatus(object):
     14   """The status class."""
     15 
     16   def __init__(self, experiment):
     17     self.experiment = experiment
     18     self.num_total = len(self.experiment.benchmark_runs)
     19     self.completed = 0
     20     self.new_job_start_time = time.time()
     21     self.log_level = experiment.log_level
     22 
     23   def _GetProgressBar(self, num_complete, num_total):
     24     ret = 'Done: %s%%' % int(100.0 * num_complete / num_total)
     25     bar_length = 50
     26     done_char = '>'
     27     undone_char = ' '
     28     num_complete_chars = bar_length * num_complete / num_total
     29     num_undone_chars = bar_length - num_complete_chars
     30     ret += ' [%s%s]' % (num_complete_chars * done_char,
     31                         num_undone_chars * undone_char)
     32     return ret
     33 
     34   def GetProgressString(self):
     35     """Get the elapsed_time, ETA."""
     36     current_time = time.time()
     37     if self.experiment.start_time:
     38       elapsed_time = current_time - self.experiment.start_time
     39     else:
     40       elapsed_time = 0
     41     try:
     42       if self.completed != self.experiment.num_complete:
     43         self.completed = self.experiment.num_complete
     44         self.new_job_start_time = current_time
     45       time_completed_jobs = (elapsed_time -
     46                              (current_time - self.new_job_start_time))
     47       # eta is calculated as:
     48       #   ETA = (num_jobs_not_yet_started * estimated_time_per_job)
     49       #          + time_left_for_current_job
     50       #
     51       #   where
     52       #        num_jobs_not_yet_started = (num_total - num_complete - 1)
     53       #
     54       #        estimated_time_per_job = time_completed_jobs / num_run_complete
     55       #
     56       #        time_left_for_current_job = estimated_time_per_job -
     57       #                                    time_spent_so_far_on_current_job
     58       #
     59       #  The biggest problem with this calculation is its assumption that
     60       #  all jobs have roughly the same running time (blatantly false!).
     61       #
     62       #  ETA can come out negative if the time spent on the current job is
     63       #  greater than the estimated time per job (e.g. you're running the
     64       #  first long job, after a series of short jobs).  For now, if that
     65       #  happens, we set the ETA to "Unknown."
     66       #
     67       eta_seconds = (float(self.num_total - self.experiment.num_complete - 1) *
     68                      time_completed_jobs / self.experiment.num_run_complete +
     69                      (time_completed_jobs / self.experiment.num_run_complete -
     70                       (current_time - self.new_job_start_time)))
     71 
     72       eta_seconds = int(eta_seconds)
     73       if eta_seconds > 0:
     74         eta = datetime.timedelta(seconds=eta_seconds)
     75       else:
     76         eta = 'Unknown'
     77     except ZeroDivisionError:
     78       eta = 'Unknown'
     79     strings = []
     80     strings.append('Current time: %s Elapsed: %s ETA: %s' %
     81                    (datetime.datetime.now(),
     82                     datetime.timedelta(seconds=int(elapsed_time)), eta))
     83     strings.append(self._GetProgressBar(self.experiment.num_complete,
     84                                         self.num_total))
     85     return '\n'.join(strings)
     86 
     87   def GetStatusString(self):
     88     """Get the status string of all the benchmark_runs."""
     89     status_bins = collections.defaultdict(list)
     90     for benchmark_run in self.experiment.benchmark_runs:
     91       status_bins[benchmark_run.timeline.GetLastEvent()].append(benchmark_run)
     92 
     93     status_strings = []
     94     for key, val in status_bins.iteritems():
     95       if key == 'RUNNING':
     96         get_description = self._GetNamesAndIterations
     97       else:
     98         get_description = self._GetCompactNamesAndIterations
     99       status_strings.append('%s: %s' % (key, get_description(val)))
    100 
    101     thread_status = ''
    102     thread_status_format = 'Thread Status: \n{}\n'
    103     if (self.experiment.schedv2() is None and
    104         self.experiment.log_level == 'verbose'):
    105       # Add the machine manager status.
    106       thread_status = thread_status_format.format(
    107           self.experiment.machine_manager.AsString())
    108     elif self.experiment.schedv2():
    109       # In schedv2 mode, we always print out thread status.
    110       thread_status = thread_status_format.format(self.experiment.schedv2(
    111       ).threads_status_as_string())
    112 
    113     result = '{}{}'.format(thread_status, '\n'.join(status_strings))
    114 
    115     return result
    116 
    117   def _GetNamesAndIterations(self, benchmark_runs):
    118     strings = []
    119     t = time.time()
    120     for benchmark_run in benchmark_runs:
    121       t_last = benchmark_run.timeline.GetLastEventTime()
    122       elapsed = str(datetime.timedelta(seconds=int(t - t_last)))
    123       strings.append("'{0}' {1}".format(benchmark_run.name, elapsed))
    124     return ' %s (%s)' % (len(strings), ', '.join(strings))
    125 
    126   def _GetCompactNamesAndIterations(self, benchmark_runs):
    127     grouped_benchmarks = collections.defaultdict(list)
    128     for benchmark_run in benchmark_runs:
    129       grouped_benchmarks[benchmark_run.label.name].append(benchmark_run)
    130 
    131     output_segs = []
    132     for label_name, label_runs in grouped_benchmarks.iteritems():
    133       strings = []
    134       benchmark_iterations = collections.defaultdict(list)
    135       for benchmark_run in label_runs:
    136         assert benchmark_run.label.name == label_name
    137         benchmark_name = benchmark_run.benchmark.name
    138         benchmark_iterations[benchmark_name].append(benchmark_run.iteration)
    139       for key, val in benchmark_iterations.iteritems():
    140         val.sort()
    141         iterations = ','.join(map(str, val))
    142         strings.append('{} [{}]'.format(key, iterations))
    143       output_segs.append('  ' + label_name + ': ' + ', '.join(strings) + '\n')
    144 
    145     return ' %s \n%s' % (len(benchmark_runs), ''.join(output_segs))
    146