Home | History | Annotate | Download | only in common_lib
      1 # Shell class for a test, inherited by all individual tests
      2 #
      3 # Methods:
      4 #       __init__        initialise
      5 #       initialize      run once for each job
      6 #       setup           run once for each new version of the test installed
      7 #       run             run the test (wrapped by job.run_test())
      8 #
      9 # Data:
     10 #       job             backreference to the job this test instance is part of
     11 #       outputdir       eg. results/<job>/<testname.tag>
     12 #       resultsdir      eg. results/<job>/<testname.tag>/results
     13 #       profdir         eg. results/<job>/<testname.tag>/profiling
     14 #       debugdir        eg. results/<job>/<testname.tag>/debug
     15 #       bindir          eg. tests/<test>
     16 #       src             eg. tests/<test>/src
     17 #       tmpdir          eg. tmp/<tempname>_<testname.tag>
     18 
     19 #pylint: disable=C0111
     20 
     21 import fcntl
     22 import json
     23 import logging
     24 import os
     25 import re
     26 import shutil
     27 import stat
     28 import sys
     29 import tempfile
     30 import time
     31 import traceback
     32 
     33 from autotest_lib.client.bin import utils
     34 from autotest_lib.client.common_lib import error
     35 from autotest_lib.client.common_lib import utils as client_utils
     36 
     37 try:
     38     from chromite.lib import metrics
     39 except ImportError:
     40     metrics = client_utils.metrics_mock
     41 
     42 
     43 class base_test(object):
     44     preserve_srcdir = False
     45 
     46     def __init__(self, job, bindir, outputdir):
     47         self.job = job
     48         self.pkgmgr = job.pkgmgr
     49         self.autodir = job.autodir
     50         self.outputdir = outputdir
     51         self.tagged_testname = os.path.basename(self.outputdir)
     52         self.resultsdir = os.path.join(self.outputdir, 'results')
     53         os.mkdir(self.resultsdir)
     54         self.profdir = os.path.join(self.outputdir, 'profiling')
     55         os.mkdir(self.profdir)
     56         self.debugdir = os.path.join(self.outputdir, 'debug')
     57         os.mkdir(self.debugdir)
     58         # TODO(ericli): figure out how autotest crash handler work with cros
     59         # Once this is re-enabled import getpass. crosbug.com/31232
     60         # crash handler, we should restore it in near term.
     61         # if getpass.getuser() == 'root':
     62         #     self.configure_crash_handler()
     63         # else:
     64         self.crash_handling_enabled = False
     65         self.bindir = bindir
     66         self.srcdir = os.path.join(self.bindir, 'src')
     67         self.tmpdir = tempfile.mkdtemp("_" + self.tagged_testname,
     68                                        dir=job.tmpdir)
     69         self._keyvals = []
     70         self._new_keyval = False
     71         self.failed_constraints = []
     72         self.iteration = 0
     73         self.before_iteration_hooks = []
     74         self.after_iteration_hooks = []
     75 
     76         # Flag to indicate if the test has succeeded or failed.
     77         self.success = False
     78 
     79 
     80     def configure_crash_handler(self):
     81         pass
     82 
     83 
     84     def crash_handler_report(self):
     85         pass
     86 
     87 
     88     def assert_(self, expr, msg='Assertion failed.'):
     89         if not expr:
     90             raise error.TestError(msg)
     91 
     92 
     93     def write_test_keyval(self, attr_dict):
     94         utils.write_keyval(self.outputdir, attr_dict)
     95 
     96 
     97     @staticmethod
     98     def _append_type_to_keys(dictionary, typename):
     99         new_dict = {}
    100         for key, value in dictionary.iteritems():
    101             new_key = "%s{%s}" % (key, typename)
    102             new_dict[new_key] = value
    103         return new_dict
    104 
    105 
    106     def output_perf_value(self, description, value, units=None,
    107                           higher_is_better=None, graph=None,
    108                           replacement='_', replace_existing_values=False):
    109         """
    110         Records a measured performance value in an output file.
    111 
    112         The output file will subsequently be parsed by the TKO parser to have
    113         the information inserted into the results database.
    114 
    115         @param description: A string describing the measured perf value. Must
    116                 be maximum length 256, and may only contain letters, numbers,
    117                 periods, dashes, and underscores.  For example:
    118                 "page_load_time", "scrolling-frame-rate".
    119         @param value: A number representing the measured perf value, or a list
    120                 of measured values if a test takes multiple measurements.
    121                 Measured perf values can be either ints or floats.
    122         @param units: A string describing the units associated with the
    123                 measured perf value. Must be maximum length 32, and may only
    124                 contain letters, numbers, periods, dashes, and underscores.
    125                 For example: "msec", "fps", "score", "runs_per_second".
    126         @param higher_is_better: A boolean indicating whether or not a "higher"
    127                 measured perf value is considered to be better. If False, it is
    128                 assumed that a "lower" measured value is considered to be
    129                 better. This impacts dashboard plotting and email notification.
    130                 Pure autotests are expected to specify either True or False!
    131                 This value can be set to "None" to indicate that the perf
    132                 dashboard should apply the rules encoded via Chromium
    133                 unit-info.json. This is only used for tracking Chromium based
    134                 tests (in particular telemetry).
    135         @param graph: A string indicating the name of the graph on which
    136                 the perf value will be subsequently displayed on the chrome perf
    137                 dashboard. This allows multiple metrics be grouped together on
    138                 the same graphs. Defaults to None, indicating that the perf
    139                 value should be displayed individually on a separate graph.
    140         @param replacement: string to replace illegal characters in
    141                 |description| and |units| with.
    142         @param replace_existing_values: A boolean indicating whether or not a
    143                 new added perf value should replace existing perf.
    144         """
    145         if len(description) > 256:
    146             raise ValueError('The description must be at most 256 characters.')
    147         if units and len(units) > 32:
    148             raise ValueError('The units must be at most 32 characters.')
    149 
    150         # If |replacement| is legal replace illegal characters with it.
    151         string_regex = re.compile(r'[^-\.\w]')
    152         if replacement is None or re.search(string_regex, replacement):
    153             raise ValueError('Invalid replacement string to mask illegal '
    154                              'characters. May only contain letters, numbers, '
    155                              'periods, dashes, and underscores. '
    156                              'replacement: %s' % replacement)
    157         description = re.sub(string_regex, replacement, description)
    158         units = re.sub(string_regex, replacement, units) if units else None
    159 
    160         charts = {}
    161         output_file = os.path.join(self.resultsdir, 'results-chart.json')
    162         if os.path.isfile(output_file):
    163             with open(output_file, 'r') as fp:
    164                 contents = fp.read()
    165                 if contents:
    166                      charts = json.loads(contents)
    167 
    168         if graph:
    169             first_level = graph
    170             second_level = description
    171         else:
    172             first_level = description
    173             second_level = 'summary'
    174 
    175         direction = 'up' if higher_is_better else 'down'
    176 
    177         # All input should be a number - but at times there are strings
    178         # representing numbers logged, attempt to convert them to numbers.
    179         # If a non number string is logged an exception will be thrown.
    180         if isinstance(value, list):
    181           value = map(float, value)
    182         else:
    183           value = float(value)
    184 
    185         result_type = 'scalar'
    186         value_key = 'value'
    187         result_value = value
    188 
    189         # The chart json spec go/telemetry-json differenciates between a single
    190         # value vs a list of values.  Lists of values get extra processing in
    191         # the chromeperf dashboard ( mean, standard deviation etc)
    192         # Tests can log one or more values for the same metric, to adhere stricly
    193         # to the specification the first value logged is a scalar but if another
    194         # value is logged the results become a list of scalar.
    195         # TODO Figure out if there would be any difference of always using list
    196         # of scalar even if there is just one item in the list.
    197         if isinstance(value, list):
    198             result_type = 'list_of_scalar_values'
    199             value_key = 'values'
    200             if first_level in charts and second_level in charts[first_level]:
    201                 if 'values' in charts[first_level][second_level]:
    202                     result_value = charts[first_level][second_level]['values']
    203                 elif 'value' in charts[first_level][second_level]:
    204                     result_value = [charts[first_level][second_level]['value']]
    205                 if replace_existing_values:
    206                     result_value = value
    207                 else:
    208                     result_value.extend(value)
    209             else:
    210                 result_value = value
    211         elif (first_level in charts and second_level in charts[first_level] and
    212               not replace_existing_values):
    213             result_type = 'list_of_scalar_values'
    214             value_key = 'values'
    215             if 'values' in charts[first_level][second_level]:
    216                 result_value = charts[first_level][second_level]['values']
    217                 result_value.append(value)
    218             else:
    219                 result_value = [charts[first_level][second_level]['value'], value]
    220 
    221         test_data = {
    222             second_level: {
    223                  'type': result_type,
    224                  'units': units,
    225                  value_key: result_value,
    226                  'improvement_direction': direction
    227            }
    228         }
    229 
    230         if first_level in charts:
    231             charts[first_level].update(test_data)
    232         else:
    233             charts.update({first_level: test_data})
    234 
    235         with open(output_file, 'w') as fp:
    236             fp.write(json.dumps(charts, indent=2))
    237 
    238 
    239     def write_perf_keyval(self, perf_dict):
    240         self.write_iteration_keyval({}, perf_dict)
    241 
    242 
    243     def write_attr_keyval(self, attr_dict):
    244         self.write_iteration_keyval(attr_dict, {})
    245 
    246 
    247     def write_iteration_keyval(self, attr_dict, perf_dict):
    248         # append the dictionaries before they have the {perf} and {attr} added
    249         self._keyvals.append({'attr':attr_dict, 'perf':perf_dict})
    250         self._new_keyval = True
    251 
    252         if attr_dict:
    253             attr_dict = self._append_type_to_keys(attr_dict, "attr")
    254             utils.write_keyval(self.resultsdir, attr_dict, type_tag="attr")
    255 
    256         if perf_dict:
    257             perf_dict = self._append_type_to_keys(perf_dict, "perf")
    258             utils.write_keyval(self.resultsdir, perf_dict, type_tag="perf")
    259 
    260         keyval_path = os.path.join(self.resultsdir, "keyval")
    261         print >> open(keyval_path, "a"), ""
    262 
    263 
    264     def analyze_perf_constraints(self, constraints):
    265         if not self._new_keyval:
    266             return
    267 
    268         # create a dict from the keyvals suitable as an environment for eval
    269         keyval_env = self._keyvals[-1]['perf'].copy()
    270         keyval_env['__builtins__'] = None
    271         self._new_keyval = False
    272         failures = []
    273 
    274         # evaluate each constraint using the current keyvals
    275         for constraint in constraints:
    276             logging.info('___________________ constraint = %s', constraint)
    277             logging.info('___________________ keyvals = %s', keyval_env)
    278 
    279             try:
    280                 if not eval(constraint, keyval_env):
    281                     failures.append('%s: constraint was not met' % constraint)
    282             except:
    283                 failures.append('could not evaluate constraint: %s'
    284                                 % constraint)
    285 
    286         # keep track of the errors for each iteration
    287         self.failed_constraints.append(failures)
    288 
    289 
    290     def process_failed_constraints(self):
    291         msg = ''
    292         for i, failures in enumerate(self.failed_constraints):
    293             if failures:
    294                 msg += 'iteration %d:%s  ' % (i, ','.join(failures))
    295 
    296         if msg:
    297             raise error.TestFail(msg)
    298 
    299 
    300     def register_before_iteration_hook(self, iteration_hook):
    301         """
    302         This is how we expect test writers to register a before_iteration_hook.
    303         This adds the method to the list of hooks which are executed
    304         before each iteration.
    305 
    306         @param iteration_hook: Method to run before each iteration. A valid
    307                                hook accepts a single argument which is the
    308                                test object.
    309         """
    310         self.before_iteration_hooks.append(iteration_hook)
    311 
    312 
    313     def register_after_iteration_hook(self, iteration_hook):
    314         """
    315         This is how we expect test writers to register an after_iteration_hook.
    316         This adds the method to the list of hooks which are executed
    317         after each iteration. Hooks are executed starting with the most-
    318         recently registered, in stack fashion.
    319 
    320         @param iteration_hook: Method to run after each iteration. A valid
    321                                hook accepts a single argument which is the
    322                                test object.
    323         """
    324         self.after_iteration_hooks.append(iteration_hook)
    325 
    326 
    327     def initialize(self):
    328         pass
    329 
    330 
    331     def setup(self):
    332         pass
    333 
    334 
    335     def warmup(self, *args, **dargs):
    336         pass
    337 
    338 
    339     def drop_caches_between_iterations(self):
    340         if self.job.drop_caches_between_iterations:
    341             utils.drop_caches()
    342 
    343 
    344     def _call_run_once(self, constraints, profile_only,
    345                        postprocess_profiled_run, args, dargs):
    346         self.drop_caches_between_iterations()
    347         # execute iteration hooks
    348         if not self.job.fast:
    349             logging.debug('Starting before_iteration_hooks for %s',
    350                           self.tagged_testname)
    351             with metrics.SecondsTimer(
    352                     'chromeos/autotest/job/before_iteration_hook_duration'):
    353                 for hook in self.before_iteration_hooks:
    354                     hook(self)
    355             logging.debug('before_iteration_hooks completed')
    356 
    357         finished = False
    358         try:
    359             if profile_only:
    360                 if not self.job.profilers.present():
    361                     self.job.record('WARN', None, None,
    362                                     'No profilers have been added but '
    363                                     'profile_only is set - nothing '
    364                                     'will be run')
    365                 self.run_once_profiling(postprocess_profiled_run,
    366                                         *args, **dargs)
    367             else:
    368                 self.before_run_once()
    369                 logging.debug('starting test(run_once()), test details follow'
    370                               '\n%r', args)
    371                 self.run_once(*args, **dargs)
    372                 logging.debug('The test has completed successfully')
    373                 self.after_run_once()
    374 
    375             self.postprocess_iteration()
    376             self.analyze_perf_constraints(constraints)
    377             finished = True
    378         # Catch and re-raise to let after_iteration_hooks see the exception.
    379         except Exception as e:
    380             logging.debug('Test failed due to %s. Exception log follows the '
    381                           'after_iteration_hooks.', str(e))
    382             raise
    383         finally:
    384             if not finished or not self.job.fast:
    385                 logging.debug('Starting after_iteration_hooks for %s',
    386                               self.tagged_testname)
    387                 with metrics.SecondsTimer(
    388                         'chromeos/autotest/job/after_iteration_hook_duration'):
    389                     for hook in reversed(self.after_iteration_hooks):
    390                         hook(self)
    391                 logging.debug('after_iteration_hooks completed')
    392 
    393 
    394     def execute(self, iterations=None, test_length=None, profile_only=None,
    395                 _get_time=time.time, postprocess_profiled_run=None,
    396                 constraints=(), *args, **dargs):
    397         """
    398         This is the basic execute method for the tests inherited from base_test.
    399         If you want to implement a benchmark test, it's better to implement
    400         the run_once function, to cope with the profiling infrastructure. For
    401         other tests, you can just override the default implementation.
    402 
    403         @param test_length: The minimum test length in seconds. We'll run the
    404             run_once function for a number of times large enough to cover the
    405             minimum test length.
    406 
    407         @param iterations: A number of iterations that we'll run the run_once
    408             function. This parameter is incompatible with test_length and will
    409             be silently ignored if you specify both.
    410 
    411         @param profile_only: If true run X iterations with profilers enabled.
    412             If false run X iterations and one with profiling if profiles are
    413             enabled. If None, default to the value of job.default_profile_only.
    414 
    415         @param _get_time: [time.time] Used for unit test time injection.
    416 
    417         @param postprocess_profiled_run: Run the postprocessing for the
    418             profiled run.
    419         """
    420 
    421         # For our special class of tests, the benchmarks, we don't want
    422         # profilers to run during the test iterations. Let's reserve only
    423         # the last iteration for profiling, if needed. So let's stop
    424         # all profilers if they are present and active.
    425         profilers = self.job.profilers
    426         if profilers.active():
    427             profilers.stop(self)
    428         if profile_only is None:
    429             profile_only = self.job.default_profile_only
    430         # If the user called this test in an odd way (specified both iterations
    431         # and test_length), let's warn them.
    432         if iterations and test_length:
    433             logging.debug('Iterations parameter ignored (timed execution)')
    434         if test_length:
    435             test_start = _get_time()
    436             time_elapsed = 0
    437             timed_counter = 0
    438             logging.debug('Test started. Specified %d s as the minimum test '
    439                           'length', test_length)
    440             while time_elapsed < test_length:
    441                 timed_counter = timed_counter + 1
    442                 if time_elapsed == 0:
    443                     logging.debug('Executing iteration %d', timed_counter)
    444                 elif time_elapsed > 0:
    445                     logging.debug('Executing iteration %d, time_elapsed %d s',
    446                                   timed_counter, time_elapsed)
    447                 self._call_run_once(constraints, profile_only,
    448                                     postprocess_profiled_run, args, dargs)
    449                 test_iteration_finish = _get_time()
    450                 time_elapsed = test_iteration_finish - test_start
    451             logging.debug('Test finished after %d iterations, '
    452                           'time elapsed: %d s', timed_counter, time_elapsed)
    453         else:
    454             if iterations is None:
    455                 iterations = 1
    456             if iterations > 1:
    457                 logging.debug('Test started. Specified %d iterations',
    458                               iterations)
    459             for self.iteration in xrange(1, iterations + 1):
    460                 if iterations > 1:
    461                     logging.debug('Executing iteration %d of %d',
    462                                   self.iteration, iterations)
    463                 self._call_run_once(constraints, profile_only,
    464                                     postprocess_profiled_run, args, dargs)
    465 
    466         if not profile_only:
    467             self.iteration += 1
    468             self.run_once_profiling(postprocess_profiled_run, *args, **dargs)
    469 
    470         # Do any postprocessing, normally extracting performance keyvals, etc
    471         self.postprocess()
    472         self.process_failed_constraints()
    473 
    474 
    475     def run_once_profiling(self, postprocess_profiled_run, *args, **dargs):
    476         profilers = self.job.profilers
    477         # Do a profiling run if necessary
    478         if profilers.present():
    479             self.drop_caches_between_iterations()
    480             profilers.before_start(self)
    481 
    482             self.before_run_once()
    483             profilers.start(self)
    484             logging.debug('Profilers present. Profiling run started')
    485 
    486             try:
    487                 self.run_once(*args, **dargs)
    488 
    489                 # Priority to the run_once() argument over the attribute.
    490                 postprocess_attribute = getattr(self,
    491                                                 'postprocess_profiled_run',
    492                                                 False)
    493 
    494                 if (postprocess_profiled_run or
    495                     (postprocess_profiled_run is None and
    496                      postprocess_attribute)):
    497                     self.postprocess_iteration()
    498 
    499             finally:
    500                 profilers.stop(self)
    501                 profilers.report(self)
    502 
    503             self.after_run_once()
    504 
    505 
    506     def postprocess(self):
    507         pass
    508 
    509 
    510     def postprocess_iteration(self):
    511         pass
    512 
    513 
    514     def cleanup(self):
    515         pass
    516 
    517 
    518     def before_run_once(self):
    519         """
    520         Override in tests that need it, will be called before any run_once()
    521         call including the profiling run (when it's called before starting
    522         the profilers).
    523         """
    524         pass
    525 
    526 
    527     def after_run_once(self):
    528         """
    529         Called after every run_once (including from a profiled run when it's
    530         called after stopping the profilers).
    531         """
    532         pass
    533 
    534 
    535     @staticmethod
    536     def _make_writable_to_others(directory):
    537         mode = os.stat(directory).st_mode
    538         mode = mode | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH
    539         os.chmod(directory, mode)
    540 
    541 
    542     def _exec(self, args, dargs):
    543         self.job.logging.tee_redirect_debug_dir(self.debugdir,
    544                                                 log_name=self.tagged_testname)
    545         try:
    546             # write out the test attributes into a keyval
    547             dargs   = dargs.copy()
    548             run_cleanup = dargs.pop('run_cleanup', self.job.run_test_cleanup)
    549             keyvals = dargs.pop('test_attributes', {}).copy()
    550             keyvals['version'] = self.version
    551             for i, arg in enumerate(args):
    552                 keyvals['param-%d' % i] = repr(arg)
    553             for name, arg in dargs.iteritems():
    554                 keyvals['param-%s' % name] = repr(arg)
    555             self.write_test_keyval(keyvals)
    556 
    557             _validate_args(args, dargs, self.initialize, self.setup,
    558                            self.execute, self.cleanup)
    559 
    560             try:
    561                 # Make resultsdir and tmpdir accessible to everyone. We may
    562                 # output data to these directories as others, e.g., chronos.
    563                 self._make_writable_to_others(self.tmpdir)
    564                 self._make_writable_to_others(self.resultsdir)
    565 
    566                 # Initialize:
    567                 _cherry_pick_call(self.initialize, *args, **dargs)
    568 
    569                 lockfile = open(os.path.join(self.job.tmpdir, '.testlock'), 'w')
    570                 try:
    571                     fcntl.flock(lockfile, fcntl.LOCK_EX)
    572                     # Setup: (compile and install the test, if needed)
    573                     p_args, p_dargs = _cherry_pick_args(self.setup, args, dargs)
    574                     utils.update_version(self.srcdir, self.preserve_srcdir,
    575                                          self.version, self.setup,
    576                                          *p_args, **p_dargs)
    577                 finally:
    578                     fcntl.flock(lockfile, fcntl.LOCK_UN)
    579                     lockfile.close()
    580 
    581                 # Execute:
    582                 os.chdir(self.outputdir)
    583 
    584                 # call self.warmup cherry picking the arguments it accepts and
    585                 # translate exceptions if needed
    586                 _call_test_function(_cherry_pick_call, self.warmup,
    587                                     *args, **dargs)
    588 
    589                 if hasattr(self, 'run_once'):
    590                     p_args, p_dargs = _cherry_pick_args(self.run_once,
    591                                                         args, dargs)
    592                     # pull in any non-* and non-** args from self.execute
    593                     for param in _get_nonstar_args(self.execute):
    594                         if param in dargs:
    595                             p_dargs[param] = dargs[param]
    596                 else:
    597                     p_args, p_dargs = _cherry_pick_args(self.execute,
    598                                                         args, dargs)
    599 
    600                 _call_test_function(self.execute, *p_args, **p_dargs)
    601             except Exception:
    602                 # Save the exception while we run our cleanup() before
    603                 # reraising it, but log it to so actual time of error is known.
    604                 exc_info = sys.exc_info()
    605                 logging.warning('The test failed with the following exception',
    606                                 exc_info=True)
    607 
    608                 try:
    609                     try:
    610                         if run_cleanup:
    611                             logging.debug('Running cleanup for test.')
    612                             _cherry_pick_call(self.cleanup, *args, **dargs)
    613                     except Exception:
    614                         logging.error('Ignoring exception during cleanup() '
    615                                       'phase:')
    616                         traceback.print_exc()
    617                         logging.error('Now raising the earlier %s error',
    618                                       exc_info[0])
    619                     self.crash_handler_report()
    620                 finally:
    621                     # Raise exception after running cleanup, reporting crash,
    622                     # and restoring job's logging, even if the first two
    623                     # actions fail.
    624                     self.job.logging.restore()
    625                     try:
    626                         raise exc_info[0], exc_info[1], exc_info[2]
    627                     finally:
    628                         # http://docs.python.org/library/sys.html#sys.exc_info
    629                         # Be nice and prevent a circular reference.
    630                         del exc_info
    631             else:
    632                 try:
    633                     if run_cleanup:
    634                         _cherry_pick_call(self.cleanup, *args, **dargs)
    635                     self.crash_handler_report()
    636                 finally:
    637                     self.job.logging.restore()
    638         except error.AutotestError:
    639             # Pass already-categorized errors on up.
    640             raise
    641         except Exception, e:
    642             # Anything else is an ERROR in our own code, not execute().
    643             raise error.UnhandledTestError(e)
    644 
    645     def runsubtest(self, url, *args, **dargs):
    646         """
    647         Execute another autotest test from inside the current test's scope.
    648 
    649         @param test: Parent test.
    650         @param url: Url of new test.
    651         @param tag: Tag added to test name.
    652         @param args: Args for subtest.
    653         @param dargs: Dictionary with args for subtest.
    654         @iterations: Number of subtest iterations.
    655         @profile_only: If true execute one profiled run.
    656         """
    657         dargs["profile_only"] = dargs.get("profile_only", False)
    658         test_basepath = self.outputdir[len(self.job.resultdir + "/"):]
    659         return self.job.run_test(url, master_testpath=test_basepath,
    660                                  *args, **dargs)
    661 
    662 
    663 def _get_nonstar_args(func):
    664     """Extract all the (normal) function parameter names.
    665 
    666     Given a function, returns a tuple of parameter names, specifically
    667     excluding the * and ** parameters, if the function accepts them.
    668 
    669     @param func: A callable that we want to chose arguments for.
    670 
    671     @return: A tuple of parameters accepted by the function.
    672     """
    673     return func.func_code.co_varnames[:func.func_code.co_argcount]
    674 
    675 
    676 def _cherry_pick_args(func, args, dargs):
    677     """Sanitize positional and keyword arguments before calling a function.
    678 
    679     Given a callable (func), an argument tuple and a dictionary of keyword
    680     arguments, pick only those arguments which the function is prepared to
    681     accept and return a new argument tuple and keyword argument dictionary.
    682 
    683     Args:
    684       func: A callable that we want to choose arguments for.
    685       args: A tuple of positional arguments to consider passing to func.
    686       dargs: A dictionary of keyword arguments to consider passing to func.
    687     Returns:
    688       A tuple of: (args tuple, keyword arguments dictionary)
    689     """
    690     # Cherry pick args:
    691     if func.func_code.co_flags & 0x04:
    692         # func accepts *args, so return the entire args.
    693         p_args = args
    694     else:
    695         p_args = ()
    696 
    697     # Cherry pick dargs:
    698     if func.func_code.co_flags & 0x08:
    699         # func accepts **dargs, so return the entire dargs.
    700         p_dargs = dargs
    701     else:
    702         # Only return the keyword arguments that func accepts.
    703         p_dargs = {}
    704         for param in _get_nonstar_args(func):
    705             if param in dargs:
    706                 p_dargs[param] = dargs[param]
    707 
    708     return p_args, p_dargs
    709 
    710 
    711 def _cherry_pick_call(func, *args, **dargs):
    712     """Cherry picks arguments from args/dargs based on what "func" accepts
    713     and calls the function with the picked arguments."""
    714     p_args, p_dargs = _cherry_pick_args(func, args, dargs)
    715     return func(*p_args, **p_dargs)
    716 
    717 
    718 def _validate_args(args, dargs, *funcs):
    719     """Verify that arguments are appropriate for at least one callable.
    720 
    721     Given a list of callables as additional parameters, verify that
    722     the proposed keyword arguments in dargs will each be accepted by at least
    723     one of the callables.
    724 
    725     NOTE: args is currently not supported and must be empty.
    726 
    727     Args:
    728       args: A tuple of proposed positional arguments.
    729       dargs: A dictionary of proposed keyword arguments.
    730       *funcs: Callables to be searched for acceptance of args and dargs.
    731     Raises:
    732       error.AutotestError: if an arg won't be accepted by any of *funcs.
    733     """
    734     all_co_flags = 0
    735     all_varnames = ()
    736     for func in funcs:
    737         all_co_flags |= func.func_code.co_flags
    738         all_varnames += func.func_code.co_varnames[:func.func_code.co_argcount]
    739 
    740     # Check if given args belongs to at least one of the methods below.
    741     if len(args) > 0:
    742         # Current implementation doesn't allow the use of args.
    743         raise error.TestError('Unnamed arguments not accepted. Please '
    744                               'call job.run_test with named args only')
    745 
    746     # Check if given dargs belongs to at least one of the methods below.
    747     if len(dargs) > 0:
    748         if not all_co_flags & 0x08:
    749             # no func accepts *dargs, so:
    750             for param in dargs:
    751                 if not param in all_varnames:
    752                     raise error.AutotestError('Unknown parameter: %s' % param)
    753 
    754 
    755 def _installtest(job, url):
    756     (group, name) = job.pkgmgr.get_package_name(url, 'test')
    757 
    758     # Bail if the test is already installed
    759     group_dir = os.path.join(job.testdir, "download", group)
    760     if os.path.exists(os.path.join(group_dir, name)):
    761         return (group, name)
    762 
    763     # If the group directory is missing create it and add
    764     # an empty  __init__.py so that sub-directories are
    765     # considered for import.
    766     if not os.path.exists(group_dir):
    767         os.makedirs(group_dir)
    768         f = file(os.path.join(group_dir, '__init__.py'), 'w+')
    769         f.close()
    770 
    771     logging.debug("%s: installing test url=%s", name, url)
    772     tarball = os.path.basename(url)
    773     tarball_path = os.path.join(group_dir, tarball)
    774     test_dir = os.path.join(group_dir, name)
    775     job.pkgmgr.fetch_pkg(tarball, tarball_path,
    776                          repo_url = os.path.dirname(url))
    777 
    778     # Create the directory for the test
    779     if not os.path.exists(test_dir):
    780         os.mkdir(os.path.join(group_dir, name))
    781 
    782     job.pkgmgr.untar_pkg(tarball_path, test_dir)
    783 
    784     os.remove(tarball_path)
    785 
    786     # For this 'sub-object' to be importable via the name
    787     # 'group.name' we need to provide an __init__.py,
    788     # so link the main entry point to this.
    789     os.symlink(name + '.py', os.path.join(group_dir, name,
    790                             '__init__.py'))
    791 
    792     # The test is now installed.
    793     return (group, name)
    794 
    795 
    796 def _call_test_function(func, *args, **dargs):
    797     """Calls a test function and translates exceptions so that errors
    798     inside test code are considered test failures."""
    799     try:
    800         return func(*args, **dargs)
    801     except error.AutotestError:
    802         raise
    803     except Exception, e:
    804         # Other exceptions must be treated as a FAIL when
    805         # raised during the test functions
    806         raise error.UnhandledTestFail(e)
    807 
    808 
    809 def runtest(job, url, tag, args, dargs,
    810             local_namespace={}, global_namespace={},
    811             before_test_hook=None, after_test_hook=None,
    812             before_iteration_hook=None, after_iteration_hook=None):
    813     local_namespace = local_namespace.copy()
    814     global_namespace = global_namespace.copy()
    815     # if this is not a plain test name then download and install the
    816     # specified test
    817     if url.endswith('.tar.bz2'):
    818         (testgroup, testname) = _installtest(job, url)
    819         bindir = os.path.join(job.testdir, 'download', testgroup, testname)
    820         importdir = os.path.join(job.testdir, 'download')
    821         modulename = '%s.%s' % (re.sub('/', '.', testgroup), testname)
    822         classname = '%s.%s' % (modulename, testname)
    823         path = testname
    824     else:
    825         # If the test is local, it may be under either testdir or site_testdir.
    826         # Tests in site_testdir override tests defined in testdir
    827         testname = path = url
    828         testgroup = ''
    829         path = re.sub(':', '/', testname)
    830         modulename = os.path.basename(path)
    831         classname = '%s.%s' % (modulename, modulename)
    832 
    833         # Try installing the test package
    834         # The job object may be either a server side job or a client side job.
    835         # 'install_pkg' method will be present only if it's a client side job.
    836         if hasattr(job, 'install_pkg'):
    837             try:
    838                 bindir = os.path.join(job.testdir, testname)
    839                 job.install_pkg(testname, 'test', bindir)
    840             except error.PackageInstallError:
    841                 # continue as a fall back mechanism and see if the test code
    842                 # already exists on the machine
    843                 pass
    844 
    845         bindir = None
    846         for dir in [job.testdir, getattr(job, 'site_testdir', None)]:
    847             if dir is not None and os.path.exists(os.path.join(dir, path)):
    848                 importdir = bindir = os.path.join(dir, path)
    849         if not bindir:
    850             raise error.TestError(testname + ': test does not exist')
    851 
    852     subdir = os.path.join(dargs.pop('master_testpath', ""), testname)
    853     outputdir = os.path.join(job.resultdir, subdir)
    854     if tag:
    855         outputdir += '.' + tag
    856 
    857     local_namespace['job'] = job
    858     local_namespace['bindir'] = bindir
    859     local_namespace['outputdir'] = outputdir
    860 
    861     sys.path.insert(0, importdir)
    862     try:
    863         exec ('import %s' % modulename, local_namespace, global_namespace)
    864         exec ("mytest = %s(job, bindir, outputdir)" % classname,
    865               local_namespace, global_namespace)
    866     finally:
    867         sys.path.pop(0)
    868 
    869     pwd = os.getcwd()
    870     os.chdir(outputdir)
    871 
    872     try:
    873         mytest = global_namespace['mytest']
    874         mytest.success = False
    875         if not job.fast and before_test_hook:
    876             logging.info('Starting before_hook for %s', mytest.tagged_testname)
    877             with metrics.SecondsTimer(
    878                     'chromeos/autotest/job/before_hook_duration'):
    879                 before_test_hook(mytest)
    880             logging.info('before_hook completed')
    881 
    882         # we use the register iteration hooks methods to register the passed
    883         # in hooks
    884         if before_iteration_hook:
    885             mytest.register_before_iteration_hook(before_iteration_hook)
    886         if after_iteration_hook:
    887             mytest.register_after_iteration_hook(after_iteration_hook)
    888         mytest._exec(args, dargs)
    889         mytest.success = True
    890     finally:
    891         os.chdir(pwd)
    892         if after_test_hook and (not mytest.success or not job.fast):
    893             logging.info('Starting after_hook for %s', mytest.tagged_testname)
    894             with metrics.SecondsTimer(
    895                     'chromeos/autotest/job/after_hook_duration'):
    896                 after_test_hook(mytest)
    897             logging.info('after_hook completed')
    898 
    899         shutil.rmtree(mytest.tmpdir, ignore_errors=True)
    900