Home | History | Annotate | Download | only in server
      1 #!/usr/bin/python -u
      2 # Copyright 2007-2008 Martin J. Bligh <mbligh (at] google.com>, Google Inc.
      3 # Released under the GPL v2
      4 
      5 """
      6 Run a control file through the server side engine
      7 """
      8 
      9 import ast
     10 import datetime
     11 import getpass
     12 import logging
     13 import os
     14 import re
     15 import signal
     16 import socket
     17 import sys
     18 import traceback
     19 import time
     20 import urllib2
     21 
     22 import common
     23 
     24 from autotest_lib.client.common_lib import control_data
     25 from autotest_lib.client.common_lib import error
     26 from autotest_lib.client.common_lib import global_config
     27 from autotest_lib.client.common_lib import utils
     28 from autotest_lib.client.common_lib.cros.graphite import autotest_es
     29 from autotest_lib.client.common_lib.cros.graphite import autotest_stats
     30 try:
     31     from autotest_lib.puppylab import results_mocker
     32 except ImportError:
     33     results_mocker = None
     34 
     35 _CONFIG = global_config.global_config
     36 
     37 require_atfork = _CONFIG.get_config_value(
     38         'AUTOSERV', 'require_atfork_module', type=bool, default=True)
     39 
     40 
     41 # Number of seconds to wait before returning if testing mode is enabled
     42 TESTING_MODE_SLEEP_SECS = 1
     43 
     44 try:
     45     import atfork
     46     atfork.monkeypatch_os_fork_functions()
     47     import atfork.stdlib_fixer
     48     # Fix the Python standard library for threading+fork safety with its
     49     # internal locks.  http://code.google.com/p/python-atfork/
     50     import warnings
     51     warnings.filterwarnings('ignore', 'logging module already imported')
     52     atfork.stdlib_fixer.fix_logging_module()
     53 except ImportError, e:
     54     from autotest_lib.client.common_lib import global_config
     55     if _CONFIG.get_config_value(
     56             'AUTOSERV', 'require_atfork_module', type=bool, default=False):
     57         print >>sys.stderr, 'Please run utils/build_externals.py'
     58         print e
     59         sys.exit(1)
     60 
     61 from autotest_lib.server import frontend
     62 from autotest_lib.server import server_logging_config
     63 from autotest_lib.server import server_job, utils, autoserv_parser, autotest
     64 from autotest_lib.server import utils as server_utils
     65 from autotest_lib.site_utils import job_directories
     66 from autotest_lib.site_utils import job_overhead
     67 from autotest_lib.site_utils import lxc
     68 from autotest_lib.site_utils import lxc_utils
     69 from autotest_lib.client.common_lib import pidfile, logging_manager
     70 from autotest_lib.client.common_lib.cros.graphite import autotest_stats
     71 
     72 # Control segment to stage server-side package.
     73 STAGE_SERVER_SIDE_PACKAGE_CONTROL_FILE = server_job._control_segment_path(
     74         'stage_server_side_package')
     75 
     76 # Command line to start servod in a moblab.
     77 START_SERVOD_CMD = 'sudo start servod BOARD=%s PORT=%s'
     78 STOP_SERVOD_CMD = 'sudo stop servod'
     79 
     80 def log_alarm(signum, frame):
     81     logging.error("Received SIGALARM. Ignoring and continuing on.")
     82     sys.exit(1)
     83 
     84 
     85 def _get_machines(parser):
     86     """Get a list of machine names from command line arg -m or a file.
     87 
     88     @param parser: Parser for the command line arguments.
     89 
     90     @return: A list of machine names from command line arg -m or the
     91              machines file specified in the command line arg -M.
     92     """
     93     if parser.options.machines:
     94         machines = parser.options.machines.replace(',', ' ').strip().split()
     95     else:
     96         machines = []
     97     machines_file = parser.options.machines_file
     98     if machines_file:
     99         machines = []
    100         for m in open(machines_file, 'r').readlines():
    101             # remove comments, spaces
    102             m = re.sub('#.*', '', m).strip()
    103             if m:
    104                 machines.append(m)
    105         logging.debug('Read list of machines from file: %s', machines_file)
    106         logging.debug('Machines: %s', ','.join(machines))
    107 
    108     if machines:
    109         for machine in machines:
    110             if not machine or re.search('\s', machine):
    111                 parser.parser.error("Invalid machine: %s" % str(machine))
    112         machines = list(set(machines))
    113         machines.sort()
    114     return machines
    115 
    116 
    117 def _stage_ssp(parser):
    118     """Stage server-side package.
    119 
    120     This function calls a control segment to stage server-side package based on
    121     the job and autoserv command line option. The detail implementation could
    122     be different for each host type. Currently, only CrosHost has
    123     stage_server_side_package function defined.
    124     The script returns None if no server-side package is available. However,
    125     it may raise exception if it failed for reasons other than artifact (the
    126     server-side package) not found.
    127 
    128     @param parser: Command line arguments parser passed in the autoserv process.
    129 
    130     @return: url of the staged server-side package. Return None if server-
    131              side package is not found for the build.
    132     """
    133     # If test_source_build is not specified, default to use server-side test
    134     # code from build specified in --image.
    135     namespace = {'machines': _get_machines(parser),
    136                  'image': (parser.options.test_source_build or
    137                            parser.options.image),}
    138     script_locals = {}
    139     execfile(STAGE_SERVER_SIDE_PACKAGE_CONTROL_FILE, namespace, script_locals)
    140     return script_locals['ssp_url']
    141 
    142 
    143 def _run_with_ssp(job, container_name, job_id, results, parser, ssp_url):
    144     """Run the server job with server-side packaging.
    145 
    146     @param job: The server job object.
    147     @param container_name: Name of the container to run the test.
    148     @param job_id: ID of the test job.
    149     @param results: Folder to store results. This could be different from
    150                     parser.options.results:
    151                     parser.options.results  can be set to None for results to be
    152                     stored in a temp folder.
    153                     results can be None for autoserv run requires no logging.
    154     @param parser: Command line parser that contains the options.
    155     @param ssp_url: url of the staged server-side package.
    156     """
    157     bucket = lxc.ContainerBucket()
    158     control = (parser.args[0] if len(parser.args) > 0 and parser.args[0] != ''
    159                else None)
    160     try:
    161         test_container = bucket.setup_test(container_name, job_id, ssp_url,
    162                                            results, control=control)
    163     except Exception as e:
    164         job.record('FAIL', None, None,
    165                    'Failed to setup container for test: %s. Check logs in '
    166                    'ssp_logs folder for more details.' % e)
    167         raise
    168 
    169     args = sys.argv[:]
    170     args.remove('--require-ssp')
    171     # --parent_job_id is only useful in autoserv running in host, not in
    172     # container. Include this argument will cause test to fail for builds before
    173     # CL 286265 was merged.
    174     if '--parent_job_id' in args:
    175         index = args.index('--parent_job_id')
    176         args.remove('--parent_job_id')
    177         # Remove the actual parent job id in command line arg.
    178         del args[index]
    179 
    180     # A dictionary of paths to replace in the command line. Key is the path to
    181     # be replaced with the one in value.
    182     paths_to_replace = {}
    183     # Replace the control file path with the one in container.
    184     if control:
    185         container_control_filename = os.path.join(
    186                 lxc.CONTROL_TEMP_PATH, os.path.basename(control))
    187         paths_to_replace[control] = container_control_filename
    188     # Update result directory with the one in container.
    189     if parser.options.results:
    190         container_result_dir = os.path.join(lxc.RESULT_DIR_FMT % job_id)
    191         paths_to_replace[parser.options.results] = container_result_dir
    192     # Update parse_job directory with the one in container. The assumption is
    193     # that the result folder to be parsed is always the same as the results_dir.
    194     if parser.options.parse_job:
    195         container_parse_dir = os.path.join(lxc.RESULT_DIR_FMT % job_id)
    196         paths_to_replace[parser.options.parse_job] = container_result_dir
    197 
    198     args = [paths_to_replace.get(arg, arg) for arg in args]
    199 
    200     # Apply --use-existing-results, results directory is aready created and
    201     # mounted in container. Apply this arg to avoid exception being raised.
    202     if not '--use-existing-results' in args:
    203         args.append('--use-existing-results')
    204 
    205     # Make sure autoserv running in container using a different pid file.
    206     if not '--pidfile-label' in args:
    207         args.extend(['--pidfile-label', 'container_autoserv'])
    208 
    209     cmd_line = ' '.join(["'%s'" % arg if ' ' in arg else arg for arg in args])
    210     logging.info('Run command in container: %s', cmd_line)
    211     success = False
    212     try:
    213         test_container.attach_run(cmd_line)
    214         success = True
    215     except Exception as e:
    216         # If the test run inside container fails without generating any log,
    217         # write a message to status.log to help troubleshooting.
    218         debug_files = os.listdir(os.path.join(results, 'debug'))
    219         if not debug_files:
    220             job.record('FAIL', None, None,
    221                        'Failed to run test inside the container: %s. Check '
    222                        'logs in ssp_logs folder for more details.' % e)
    223         raise
    224     finally:
    225         counter_key = '%s.%s' % (lxc.STATS_KEY,
    226                                  'success' if success else 'fail')
    227         autotest_stats.Counter(counter_key).increment()
    228         # metadata is uploaded separately so it can use http to upload.
    229         metadata = {'drone': socket.gethostname(),
    230                     'job_id': job_id,
    231                     'success': success}
    232         autotest_es.post(use_http=True,
    233                          type_str=lxc.CONTAINER_RUN_TEST_METADB_TYPE,
    234                          metadata=metadata)
    235         test_container.destroy()
    236 
    237 
    238 def correct_results_folder_permission(results):
    239     """Make sure the results folder has the right permission settings.
    240 
    241     For tests running with server-side packaging, the results folder has the
    242     owner of root. This must be changed to the user running the autoserv
    243     process, so parsing job can access the results folder.
    244     TODO(dshi): crbug.com/459344 Remove this function when test container can be
    245     unprivileged container.
    246 
    247     @param results: Path to the results folder.
    248 
    249     """
    250     if not results:
    251         return
    252 
    253     try:
    254         utils.run('sudo -n chown -R %s "%s"' % (os.getuid(), results))
    255         utils.run('sudo -n chgrp -R %s "%s"' % (os.getgid(), results))
    256     except error.CmdError as e:
    257         metadata = {'error': str(e),
    258                     'result_folder': results,
    259                     'drone': socket.gethostname()}
    260         autotest_es.post(use_http=True, type_str='correct_results_folder_failure',
    261                          metadata=metadata)
    262         raise
    263 
    264 
    265 def _start_servod(machine):
    266     """Try to start servod in moblab if it's not already running or running with
    267     different board or port.
    268 
    269     @param machine: Name of the dut used for test.
    270     """
    271     if not utils.is_moblab():
    272         return
    273 
    274     logging.debug('Trying to start servod.')
    275     try:
    276         afe = frontend.AFE()
    277         board = server_utils.get_board_from_afe(machine, afe)
    278         hosts = afe.get_hosts(hostname=machine)
    279         servo_host = hosts[0].attributes.get('servo_host', None)
    280         servo_port = hosts[0].attributes.get('servo_port', 9999)
    281         if not servo_host in ['localhost', '127.0.0.1']:
    282             logging.warn('Starting servod is aborted. The dut\'s servo_host '
    283                          'attribute is not set to localhost.')
    284             return
    285     except (urllib2.HTTPError, urllib2.URLError):
    286         # Ignore error if RPC failed to get board
    287         logging.error('Failed to get board name from AFE. Start servod is '
    288                       'aborted')
    289         return
    290 
    291     try:
    292         pid = utils.run('pgrep servod').stdout
    293         cmd_line = utils.run('ps -fp %s' % pid).stdout
    294         if ('--board %s' % board in cmd_line and
    295             '--port %s' % servo_port in cmd_line):
    296             logging.debug('Servod is already running with given board and port.'
    297                           ' There is no need to restart servod.')
    298             return
    299         logging.debug('Servod is running with different board or port. '
    300                       'Stopping existing servod.')
    301         utils.run('sudo stop servod')
    302     except error.CmdError:
    303         # servod is not running.
    304         pass
    305 
    306     try:
    307         utils.run(START_SERVOD_CMD % (board, servo_port))
    308         logging.debug('Servod is started')
    309     except error.CmdError as e:
    310         logging.error('Servod failed to be started, error: %s', e)
    311 
    312 
    313 def run_autoserv(pid_file_manager, results, parser, ssp_url, use_ssp):
    314     """Run server job with given options.
    315 
    316     @param pid_file_manager: PidFileManager used to monitor the autoserv process
    317     @param results: Folder to store results.
    318     @param parser: Parser for the command line arguments.
    319     @param ssp_url: Url to server-side package.
    320     @param use_ssp: Set to True to run with server-side packaging.
    321     """
    322     if parser.options.warn_no_ssp:
    323         # Post a warning in the log.
    324         logging.warn('Autoserv is required to run with server-side packaging. '
    325                      'However, no drone is found to support server-side '
    326                      'packaging. The test will be executed in a drone without '
    327                      'server-side packaging supported.')
    328 
    329     # send stdin to /dev/null
    330     dev_null = os.open(os.devnull, os.O_RDONLY)
    331     os.dup2(dev_null, sys.stdin.fileno())
    332     os.close(dev_null)
    333 
    334     # Create separate process group
    335     os.setpgrp()
    336 
    337     # Container name is predefined so the container can be destroyed in
    338     # handle_sigterm.
    339     job_or_task_id = job_directories.get_job_id_or_task_id(
    340             parser.options.results)
    341     container_name = (lxc.TEST_CONTAINER_NAME_FMT %
    342                       (job_or_task_id, time.time(), os.getpid()))
    343 
    344     # Implement SIGTERM handler
    345     def handle_sigterm(signum, frame):
    346         logging.debug('Received SIGTERM')
    347         if pid_file_manager:
    348             pid_file_manager.close_file(1, signal.SIGTERM)
    349         logging.debug('Finished writing to pid_file. Killing process.')
    350 
    351         # Update results folder's file permission. This needs to be done ASAP
    352         # before the parsing process tries to access the log.
    353         if use_ssp and results:
    354             correct_results_folder_permission(results)
    355 
    356         # TODO (sbasi) - remove the time.sleep when crbug.com/302815 is solved.
    357         # This sleep allows the pending output to be logged before the kill
    358         # signal is sent.
    359         time.sleep(.1)
    360         if use_ssp:
    361             logging.debug('Destroy container %s before aborting the autoserv '
    362                           'process.', container_name)
    363             metadata = {'drone': socket.gethostname(),
    364                         'job_id': job_or_task_id,
    365                         'container_name': container_name,
    366                         'action': 'abort',
    367                         'success': True}
    368             try:
    369                 bucket = lxc.ContainerBucket()
    370                 container = bucket.get(container_name)
    371                 if container:
    372                     container.destroy()
    373                 else:
    374                     metadata['success'] = False
    375                     metadata['error'] = 'container not found'
    376                     logging.debug('Container %s is not found.', container_name)
    377             except:
    378                 metadata['success'] = False
    379                 metadata['error'] = 'Exception: %s' % sys.exc_info()
    380                 # Handle any exception so the autoserv process can be aborted.
    381                 logging.error('Failed to destroy container %s. Error: %s',
    382                               container_name, sys.exc_info())
    383             autotest_es.post(use_http=True,
    384                              type_str=lxc.CONTAINER_RUN_TEST_METADB_TYPE,
    385                              metadata=metadata)
    386             # Try to correct the result file permission again after the
    387             # container is destroyed, as the container might have created some
    388             # new files in the result folder.
    389             if results:
    390                 correct_results_folder_permission(results)
    391 
    392         os.killpg(os.getpgrp(), signal.SIGKILL)
    393 
    394     # Set signal handler
    395     signal.signal(signal.SIGTERM, handle_sigterm)
    396 
    397     # faulthandler is only needed to debug in the Lab and is not avaliable to
    398     # be imported in the chroot as part of VMTest, so Try-Except it.
    399     try:
    400         import faulthandler
    401         faulthandler.register(signal.SIGTERM, all_threads=True, chain=True)
    402         logging.debug('faulthandler registered on SIGTERM.')
    403     except ImportError:
    404         sys.exc_clear()
    405 
    406     # Ignore SIGTTOU's generated by output from forked children.
    407     signal.signal(signal.SIGTTOU, signal.SIG_IGN)
    408 
    409     # If we received a SIGALARM, let's be loud about it.
    410     signal.signal(signal.SIGALRM, log_alarm)
    411 
    412     # Server side tests that call shell scripts often depend on $USER being set
    413     # but depending on how you launch your autotest scheduler it may not be set.
    414     os.environ['USER'] = getpass.getuser()
    415 
    416     label = parser.options.label
    417     group_name = parser.options.group_name
    418     user = parser.options.user
    419     client = parser.options.client
    420     server = parser.options.server
    421     install_before = parser.options.install_before
    422     install_after = parser.options.install_after
    423     verify = parser.options.verify
    424     repair = parser.options.repair
    425     cleanup = parser.options.cleanup
    426     provision = parser.options.provision
    427     reset = parser.options.reset
    428     job_labels = parser.options.job_labels
    429     no_tee = parser.options.no_tee
    430     parse_job = parser.options.parse_job
    431     execution_tag = parser.options.execution_tag
    432     if not execution_tag:
    433         execution_tag = parse_job
    434     ssh_user = parser.options.ssh_user
    435     ssh_port = parser.options.ssh_port
    436     ssh_pass = parser.options.ssh_pass
    437     collect_crashinfo = parser.options.collect_crashinfo
    438     control_filename = parser.options.control_filename
    439     test_retry = parser.options.test_retry
    440     verify_job_repo_url = parser.options.verify_job_repo_url
    441     skip_crash_collection = parser.options.skip_crash_collection
    442     ssh_verbosity = int(parser.options.ssh_verbosity)
    443     ssh_options = parser.options.ssh_options
    444     no_use_packaging = parser.options.no_use_packaging
    445     host_attributes = parser.options.host_attributes
    446     in_lab = bool(parser.options.lab)
    447 
    448     # can't be both a client and a server side test
    449     if client and server:
    450         parser.parser.error("Can not specify a test as both server and client!")
    451 
    452     if provision and client:
    453         parser.parser.error("Cannot specify provisioning and client!")
    454 
    455     is_special_task = (verify or repair or cleanup or collect_crashinfo or
    456                        provision or reset)
    457     if len(parser.args) < 1 and not is_special_task:
    458         parser.parser.error("Missing argument: control file")
    459 
    460     if ssh_verbosity > 0:
    461         # ssh_verbosity is an integer between 0 and 3, inclusive
    462         ssh_verbosity_flag = '-' + 'v' * ssh_verbosity
    463     else:
    464         ssh_verbosity_flag = ''
    465 
    466     # We have a control file unless it's just a verify/repair/cleanup job
    467     if len(parser.args) > 0:
    468         control = parser.args[0]
    469     else:
    470         control = None
    471 
    472     machines = _get_machines(parser)
    473     if group_name and len(machines) < 2:
    474         parser.parser.error('-G %r may only be supplied with more than one '
    475                             'machine.' % group_name)
    476 
    477     kwargs = {'group_name': group_name, 'tag': execution_tag,
    478               'disable_sysinfo': parser.options.disable_sysinfo}
    479     if parser.options.parent_job_id:
    480         kwargs['parent_job_id'] = int(parser.options.parent_job_id)
    481     if control_filename:
    482         kwargs['control_filename'] = control_filename
    483     if host_attributes:
    484         kwargs['host_attributes'] = host_attributes
    485     kwargs['in_lab'] = in_lab
    486     job = server_job.server_job(control, parser.args[1:], results, label,
    487                                 user, machines, client, parse_job,
    488                                 ssh_user, ssh_port, ssh_pass,
    489                                 ssh_verbosity_flag, ssh_options,
    490                                 test_retry, **kwargs)
    491 
    492     job.logging.start_logging()
    493     job.init_parser()
    494 
    495     # perform checks
    496     job.precheck()
    497 
    498     # run the job
    499     exit_code = 0
    500     auto_start_servod = _CONFIG.get_config_value(
    501             'AUTOSERV', 'auto_start_servod', type=bool, default=False)
    502     try:
    503         try:
    504             if repair:
    505                 if auto_start_servod and len(machines) == 1:
    506                     _start_servod(machines[0])
    507                 job.repair(job_labels)
    508             elif verify:
    509                 job.verify(job_labels)
    510             elif provision:
    511                 job.provision(job_labels)
    512             elif reset:
    513                 job.reset(job_labels)
    514             elif cleanup:
    515                 job.cleanup(job_labels)
    516             else:
    517                 if auto_start_servod and len(machines) == 1:
    518                     _start_servod(machines[0])
    519                 if use_ssp:
    520                     try:
    521                         _run_with_ssp(job, container_name, job_or_task_id,
    522                                       results, parser, ssp_url)
    523                     finally:
    524                         # Update the ownership of files in result folder.
    525                         correct_results_folder_permission(results)
    526                 else:
    527                     job.run(install_before, install_after,
    528                             verify_job_repo_url=verify_job_repo_url,
    529                             only_collect_crashinfo=collect_crashinfo,
    530                             skip_crash_collection=skip_crash_collection,
    531                             job_labels=job_labels,
    532                             use_packaging=(not no_use_packaging))
    533         finally:
    534             while job.hosts:
    535                 host = job.hosts.pop()
    536                 host.close()
    537     except:
    538         exit_code = 1
    539         traceback.print_exc()
    540 
    541     if pid_file_manager:
    542         pid_file_manager.num_tests_failed = job.num_tests_failed
    543         pid_file_manager.close_file(exit_code)
    544     job.cleanup_parser()
    545 
    546     sys.exit(exit_code)
    547 
    548 
    549 def record_autoserv(options, duration_secs):
    550     """Record autoserv end-to-end time in metadata db.
    551 
    552     @param options: parser options.
    553     @param duration_secs: How long autoserv has taken, in secs.
    554     """
    555     # Get machine hostname
    556     machines = options.machines.replace(
    557             ',', ' ').strip().split() if options.machines else []
    558     num_machines = len(machines)
    559     if num_machines > 1:
    560         # Skip the case where atomic group is used.
    561         return
    562     elif num_machines == 0:
    563         machines.append('hostless')
    564 
    565     # Determine the status that will be reported.
    566     s = job_overhead.STATUS
    567     task_mapping = {
    568             'reset': s.RESETTING, 'verify': s.VERIFYING,
    569             'provision': s.PROVISIONING, 'repair': s.REPAIRING,
    570             'cleanup': s.CLEANING, 'collect_crashinfo': s.GATHERING}
    571     match = filter(lambda task: getattr(options, task, False) == True,
    572                    task_mapping)
    573     status = task_mapping[match[0]] if match else s.RUNNING
    574     is_special_task = status not in [s.RUNNING, s.GATHERING]
    575     job_or_task_id = job_directories.get_job_id_or_task_id(options.results)
    576     job_overhead.record_state_duration(
    577             job_or_task_id, machines[0], status, duration_secs,
    578             is_special_task=is_special_task)
    579 
    580 
    581 def main():
    582     start_time = datetime.datetime.now()
    583     # White list of tests with run time measurement enabled.
    584     measure_run_time_tests_names = _CONFIG.get_config_value(
    585             'AUTOSERV', 'measure_run_time_tests', type=str)
    586     if measure_run_time_tests_names:
    587         measure_run_time_tests = [t.strip() for t in
    588                                   measure_run_time_tests_names.split(',')]
    589     else:
    590         measure_run_time_tests = []
    591     # grab the parser
    592     parser = autoserv_parser.autoserv_parser
    593     parser.parse_args()
    594 
    595     if len(sys.argv) == 1:
    596         parser.parser.print_help()
    597         sys.exit(1)
    598 
    599     # If the job requires to run with server-side package, try to stage server-
    600     # side package first. If that fails with error that autotest server package
    601     # does not exist, fall back to run the job without using server-side
    602     # packaging. If option warn_no_ssp is specified, that means autoserv is
    603     # running in a drone does not support SSP, thus no need to stage server-side
    604     # package.
    605     ssp_url = None
    606     ssp_url_warning = False
    607     if (not parser.options.warn_no_ssp and parser.options.require_ssp):
    608         ssp_url = _stage_ssp(parser)
    609         # The build does not have autotest server package. Fall back to not
    610         # to use server-side package. Logging is postponed until logging being
    611         # set up.
    612         ssp_url_warning = not ssp_url
    613 
    614     if parser.options.no_logging:
    615         results = None
    616     else:
    617         results = parser.options.results
    618         if not results:
    619             results = 'results.' + time.strftime('%Y-%m-%d-%H.%M.%S')
    620         results  = os.path.abspath(results)
    621         resultdir_exists = False
    622         for filename in ('control.srv', 'status.log', '.autoserv_execute'):
    623             if os.path.exists(os.path.join(results, filename)):
    624                 resultdir_exists = True
    625         if not parser.options.use_existing_results and resultdir_exists:
    626             error = "Error: results directory already exists: %s\n" % results
    627             sys.stderr.write(error)
    628             sys.exit(1)
    629 
    630         # Now that we certified that there's no leftover results dir from
    631         # previous jobs, lets create the result dir since the logging system
    632         # needs to create the log file in there.
    633         if not os.path.isdir(results):
    634             os.makedirs(results)
    635 
    636     # Server-side packaging will only be used if it's required and the package
    637     # is available. If warn_no_ssp is specified, it means that autoserv is
    638     # running in a drone does not have SSP supported and a warning will be logs.
    639     # Therefore, it should not run with SSP.
    640     use_ssp = (not parser.options.warn_no_ssp and parser.options.require_ssp
    641                and ssp_url)
    642     if use_ssp:
    643         log_dir = os.path.join(results, 'ssp_logs') if results else None
    644         if log_dir and not os.path.exists(log_dir):
    645             os.makedirs(log_dir)
    646     else:
    647         log_dir = results
    648 
    649     logging_manager.configure_logging(
    650             server_logging_config.ServerLoggingConfig(),
    651             results_dir=log_dir,
    652             use_console=not parser.options.no_tee,
    653             verbose=parser.options.verbose,
    654             no_console_prefix=parser.options.no_console_prefix)
    655 
    656     if ssp_url_warning:
    657         logging.warn(
    658                 'Autoserv is required to run with server-side packaging. '
    659                 'However, no server-side package can be found based on '
    660                 '`--image`, host attribute job_repo_url or host label of '
    661                 'cros-version. The test will be executed without '
    662                 'server-side packaging supported.')
    663 
    664     if results:
    665         logging.info("Results placed in %s" % results)
    666 
    667         # wait until now to perform this check, so it get properly logged
    668         if (parser.options.use_existing_results and not resultdir_exists and
    669             not utils.is_in_container()):
    670             logging.error("No existing results directory found: %s", results)
    671             sys.exit(1)
    672 
    673     logging.debug('autoserv is running in drone %s.', socket.gethostname())
    674     logging.debug('autoserv command was: %s', ' '.join(sys.argv))
    675 
    676     if parser.options.write_pidfile and results:
    677         pid_file_manager = pidfile.PidFileManager(parser.options.pidfile_label,
    678                                                   results)
    679         pid_file_manager.open_file()
    680     else:
    681         pid_file_manager = None
    682 
    683     autotest.BaseAutotest.set_install_in_tmpdir(
    684         parser.options.install_in_tmpdir)
    685 
    686     timer = None
    687     try:
    688         # Take the first argument as control file name, get the test name from
    689         # the control file. If the test name exists in the list of tests with
    690         # run time measurement enabled, start a timer to begin measurement.
    691         if (len(parser.args) > 0 and parser.args[0] != '' and
    692             parser.options.machines):
    693             try:
    694                 test_name = control_data.parse_control(parser.args[0],
    695                                                        raise_warnings=True).name
    696             except control_data.ControlVariableException:
    697                 logging.debug('Failed to retrieve test name from control file.')
    698                 test_name = None
    699             if test_name in measure_run_time_tests:
    700                 machines = parser.options.machines.replace(',', ' '
    701                                                            ).strip().split()
    702                 try:
    703                     afe = frontend.AFE()
    704                     board = server_utils.get_board_from_afe(machines[0], afe)
    705                     timer = autotest_stats.Timer('autoserv_run_time.%s.%s' %
    706                                                  (board, test_name))
    707                     timer.start()
    708                 except (urllib2.HTTPError, urllib2.URLError):
    709                     # Ignore error if RPC failed to get board
    710                     pass
    711     except control_data.ControlVariableException as e:
    712         logging.error(str(e))
    713     exit_code = 0
    714     # TODO(beeps): Extend this to cover different failure modes.
    715     # Testing exceptions are matched against labels sent to autoserv. Eg,
    716     # to allow only the hostless job to run, specify
    717     # testing_exceptions: test_suite in the shadow_config. To allow both
    718     # the hostless job and dummy_Pass to run, specify
    719     # testing_exceptions: test_suite,dummy_Pass. You can figure out
    720     # what label autoserv is invoked with by looking through the logs of a test
    721     # for the autoserv command's -l option.
    722     testing_exceptions = _CONFIG.get_config_value(
    723             'AUTOSERV', 'testing_exceptions', type=list, default=[])
    724     test_mode = _CONFIG.get_config_value(
    725             'AUTOSERV', 'testing_mode', type=bool, default=False)
    726     test_mode = (results_mocker and test_mode and not
    727                  any([ex in parser.options.label
    728                       for ex in testing_exceptions]))
    729     is_task = (parser.options.verify or parser.options.repair or
    730                parser.options.provision or parser.options.reset or
    731                parser.options.cleanup or parser.options.collect_crashinfo)
    732     try:
    733         try:
    734             if test_mode:
    735                 # The parser doesn't run on tasks anyway, so we can just return
    736                 # happy signals without faking results.
    737                 if not is_task:
    738                     machine = parser.options.results.split('/')[-1]
    739 
    740                     # TODO(beeps): The proper way to do this would be to
    741                     # refactor job creation so we can invoke job.record
    742                     # directly. To do that one needs to pipe the test_name
    743                     # through run_autoserv and bail just before invoking
    744                     # the server job. See the comment in
    745                     # puppylab/results_mocker for more context.
    746                     results_mocker.ResultsMocker(
    747                             test_name if test_name else 'unknown-test',
    748                             parser.options.results, machine
    749                             ).mock_results()
    750                 return
    751             else:
    752                 run_autoserv(pid_file_manager, results, parser, ssp_url,
    753                              use_ssp)
    754         except SystemExit as e:
    755             exit_code = e.code
    756             if exit_code:
    757                 logging.exception(e)
    758         except Exception as e:
    759             # If we don't know what happened, we'll classify it as
    760             # an 'abort' and return 1.
    761             logging.exception(e)
    762             exit_code = 1
    763     finally:
    764         if pid_file_manager:
    765             pid_file_manager.close_file(exit_code)
    766         if timer:
    767             timer.stop()
    768         # Record the autoserv duration time. Must be called
    769         # just before the system exits to ensure accuracy.
    770         duration_secs = (datetime.datetime.now() - start_time).total_seconds()
    771         record_autoserv(parser.options, duration_secs)
    772     sys.exit(exit_code)
    773 
    774 
    775 if __name__ == '__main__':
    776     main()
    777