Home | History | Annotate | Download | only in doctests
      1 # setup (you can ignore this)
      2 # ###########################
      3 
      4 # a bit of setup to allow overriding rpc_interace with an RPC proxy
      5 # (to use RPC, we would say
      6 #   import rpc_client_lib
      7 #   rpc_interface = rpc_client_lib.get_proxy(
      8 #                             'http://hostname:8000/afe/server/noauth/rpc/')
      9 # )
     10 >>> if 'rpc_interface' not in globals():
     11 ...   from autotest_lib.frontend.afe import rpc_interface, models
     12 ...   from autotest_lib.frontend import thread_local
     13 ...   # set up a user for us to "login" as
     14 ...   user = models.User(login='debug_user')
     15 ...   user.access_level = 100
     16 ...   user.save()
     17 ...   thread_local.set_user(user)
     18 ...
     19 >>> from autotest_lib.frontend.afe import model_logic
     20 
     21 # get directory of this test file; we'll need it later
     22 >>> import common
     23 >>> from autotest_lib.frontend.afe import test
     24 >>> import os, datetime
     25 >>> test_path = os.path.join(os.path.dirname(test.__file__),
     26 ...                                          'doctests')
     27 >>> test_path = os.path.abspath(test_path)
     28 
     29 # disable logging
     30 >>> from autotest_lib.client.common_lib import logging_manager
     31 >>> logging_manager.logger.setLevel(100)
     32 
     33 >>> drone_set = models.DroneSet.default_drone_set_name()
     34 >>> if drone_set:
     35 ...     _ = models.DroneSet.objects.create(name=drone_set)
     36 
     37 # mock up tko rpc_interface
     38 >>> from autotest_lib.client.common_lib.test_utils import mock
     39 >>> mock.mock_god().stub_function_to_return(rpc_interface.tko_rpc_interface,
     40 ...                                         'get_status_counts',
     41 ...                                         None)
     42 
     43 # basic interface test
     44 ######################
     45 
     46 # echo a comment
     47 >>> rpc_interface.echo('test string to echo')
     48 'test string to echo'
     49 
     50 # basic object management
     51 # #######################
     52 
     53 # create a label
     54 >>> rpc_interface.add_label(name='test_label')
     55 1
     56 
     57 # we can modify the label by referencing its ID...
     58 >>> rpc_interface.modify_label(1, kernel_config='/my/kernel/config')
     59 
     60 # ...or by referencing it's name
     61 >>> rpc_interface.modify_label('test_label', platform=True)
     62 
     63 # we use get_labels to retrieve object data
     64 >>> data = rpc_interface.get_labels(name='test_label')
     65 >>> data == [{'id': 1,
     66 ...           'name': 'test_label',
     67 ...           'platform': 1,
     68 ...           'kernel_config': '/my/kernel/config',
     69 ...           'only_if_needed' : False,
     70 ...           'invalid': 0,
     71 ...           'atomic_group': None}]
     72 True
     73 
     74 # get_labels return multiple matches as lists of dictionaries
     75 >>> rpc_interface.add_label(name='label1', platform=False)
     76 2
     77 >>> rpc_interface.add_label(name='label2', platform=True)
     78 3
     79 >>> rpc_interface.add_label(name='label3', platform=False)
     80 4
     81 >>> data = rpc_interface.get_labels(platform=False)
     82 >>> data == [{'id': 2, 'name': 'label1', 'platform': 0, 'kernel_config': '',
     83 ...           'only_if_needed': False, 'invalid': 0, 'atomic_group': None},
     84 ...          {'id': 4, 'name': 'label3', 'platform': 0, 'kernel_config': '',
     85 ...           'only_if_needed': False, 'invalid': 0, 'atomic_group': None}]
     86 True
     87 
     88 # delete_label takes an ID or a name as well
     89 >>> rpc_interface.delete_label(3)
     90 >>> rpc_interface.get_labels(name='label2')
     91 []
     92 >>> rpc_interface.delete_label('test_label')
     93 >>> rpc_interface.delete_label('label1')
     94 >>> rpc_interface.delete_label('label3')
     95 >>> rpc_interface.get_labels()
     96 []
     97 
     98 # all the add*, modify*, delete*, and get* methods work the same way
     99 # hosts...
    100 >>> rpc_interface.add_host(hostname='ipaj1', locked=True, lock_reason='Locked device on creation')
    101 1
    102 >>> data = rpc_interface.get_hosts()
    103 
    104 # delete the lock_time field, since that can't be reliably checked
    105 >>> del data[0]['lock_time']
    106 >>> data == [{'id': 1,
    107 ...           'hostname': 'ipaj1',
    108 ...           'locked': 1,
    109 ...           'synch_id': None,
    110 ...           'status': 'Ready',
    111 ...           'labels': [],
    112 ...           'atomic_group': None,
    113 ...           'acls': ['Everyone'],
    114 ...           'platform': None,
    115 ...           'attributes': {},
    116 ...           'invalid': 0,
    117 ...           'protection': 'No protection',
    118 ...           'locked_by': 'debug_user',
    119 ...           'dirty': True,
    120 ...           'leased': 1,
    121 ...           'shard': None,
    122 ...           'lock_reason': 'Locked device on creation'}]
    123 True
    124 >>> rpc_interface.modify_host(id='ipaj1', status='Hello')
    125 Traceback (most recent call last):
    126 ValidationError: {'status': 'Host status can not be modified by the frontend.'}
    127 >>> rpc_interface.modify_host(id='ipaj1', hostname='ipaj1000')
    128 >>> rpc_interface.modify_hosts(
    129 ...     host_filter_data={'hostname': 'ipaj1000'},
    130 ...     update_data={'locked': False})
    131 >>> data = rpc_interface.get_hosts()
    132 >>> bool(data[0]['locked'])
    133 False
    134 
    135 # test already locked/unlocked failures
    136 >>> rpc_interface.modify_host(id='ipaj1000', locked=False)
    137 Traceback (most recent call last):
    138 ValidationError: {'locked': u'Host ipaj1000 already unlocked.'}
    139 >>> rpc_interface.modify_host(id='ipaj1000', locked=True, lock_reason='Locking a locked device')
    140 >>> try:
    141 ...     rpc_interface.modify_host(id='ipaj1000', locked=True)
    142 ... except model_logic.ValidationError, err:
    143 ...     pass
    144 >>> assert ('locked' in err.message_dict
    145 ...         and err.message_dict['locked'].startswith('Host ipaj1000 already locked'))
    146 >>> rpc_interface.delete_host(id='ipaj1000')
    147 >>> rpc_interface.get_hosts() == []
    148 True
    149 
    150 # tests...
    151 >>> rpc_interface.add_test(name='sleeptest', test_type='Client', author='Test',
    152 ...                        description='Sleep Test', test_time=1,
    153 ...                        test_category='Functional',
    154 ...                        test_class='Kernel', path='sleeptest')
    155 1
    156 >>> rpc_interface.modify_test('sleeptest', path='/my/path')
    157 >>> data = rpc_interface.get_tests()
    158 >>> data == [{'id': 1,
    159 ...           'name': 'sleeptest',
    160 ...           'author': 'Test',
    161 ...           'description': 'Sleep Test',
    162 ...           'dependencies': '',
    163 ...           'experimental': 1,
    164 ...           'sync_count': 1,
    165 ...           'test_type': 'Client',
    166 ...           'test_class': 'Kernel',
    167 ...           'test_time': 'SHORT',
    168 ...           'run_verify': 0,
    169 ...           'run_reset': 1,
    170 ...           'test_category': 'Functional',
    171 ...           'path': '/my/path',
    172 ...           'test_retry': 0}]
    173 True
    174 >>> rpc_interface.delete_test('sleeptest')
    175 >>> rpc_interface.get_tests() == []
    176 True
    177 
    178 # profilers...
    179 >>> rpc_interface.add_profiler(name='oprofile')
    180 1
    181 >>> rpc_interface.modify_profiler('oprofile', description='Oh profile!')
    182 >>> data = rpc_interface.get_profilers()
    183 >>> data == [{'id': 1,
    184 ...           'name': 'oprofile',
    185 ...           'description': 'Oh profile!'}]
    186 True
    187 >>> rpc_interface.delete_profiler('oprofile')
    188 >>> rpc_interface.get_profilers() == []
    189 True
    190 
    191 
    192 # users...
    193 >>> rpc_interface.add_user(login='showard')
    194 2
    195 >>> rpc_interface.modify_user('showard', access_level=1)
    196 >>> data = rpc_interface.get_users(login='showard')
    197 >>> data == [{'id': 2,
    198 ...           'login': 'showard',
    199 ...           'access_level': 1,
    200 ...           'reboot_before': 'If dirty',
    201 ...           'reboot_after': 'Never',
    202 ...           'drone_set': None,
    203 ...           'show_experimental': False}]
    204 True
    205 >>> rpc_interface.delete_user('showard')
    206 >>> rpc_interface.get_users(login='showard') == []
    207 True
    208 
    209 # acl groups...
    210 # 1 ACL group already exists, named "Everyone" (ID 1)
    211 >>> rpc_interface.add_acl_group(name='my_group')
    212 2
    213 >>> rpc_interface.modify_acl_group('my_group', description='my new acl group')
    214 >>> data = rpc_interface.get_acl_groups(name='my_group')
    215 >>> data == [{'id': 2,
    216 ...           'name': 'my_group',
    217 ...           'description': 'my new acl group',
    218 ...           'users': ['debug_user'],
    219 ...           'hosts': []}]
    220 True
    221 >>> rpc_interface.delete_acl_group('my_group')
    222 >>> data = rpc_interface.get_acl_groups()
    223 >>> data == [{'id': 1,
    224 ...           'name': 'Everyone',
    225 ...           'description': '',
    226 ...           'users': ['debug_user'],
    227 ...           'hosts': []}]
    228 True
    229 
    230 
    231 # managing many-to-many relationships
    232 # ###################################
    233 
    234 # first, create some hosts and labels to play around with
    235 >>> rpc_interface.add_host(hostname='host1')
    236 2
    237 >>> rpc_interface.add_host(hostname='host2')
    238 3
    239 >>> rpc_interface.add_label(name='label1')
    240 2
    241 >>> rpc_interface.add_label(name='label2', platform=True)
    242 3
    243 
    244 # add hosts to labels
    245 >>> rpc_interface.host_add_labels(id='host1', labels=['label1'])
    246 >>> rpc_interface.host_add_labels(id='host2', labels=['label1', 'label2'])
    247 
    248 # check labels for hosts
    249 >>> data = rpc_interface.get_hosts(hostname='host1')
    250 >>> data[0]['labels']
    251 [u'label1']
    252 >>> data = rpc_interface.get_hosts(hostname='host2')
    253 >>> data[0]['labels']
    254 [u'label1', u'label2']
    255 >>> data[0]['platform']
    256 u'label2'
    257 
    258 # check host lists for labels -- use double underscore to specify fields of
    259 # related objects
    260 >>> data = rpc_interface.get_hosts(labels__name='label1')
    261 >>> [host['hostname'] for host in data]
    262 [u'host1', u'host2']
    263 >>> data = rpc_interface.get_hosts(labels__name='label2')
    264 >>> [host['hostname'] for host in data]
    265 [u'host2']
    266 
    267 # remove a host from a label
    268 >>> rpc_interface.host_remove_labels(id='host2', labels=['label2'])
    269 >>> data = rpc_interface.get_hosts(hostname='host1')
    270 >>> data[0]['labels']
    271 [u'label1']
    272 >>> rpc_interface.get_hosts(labels__name='label2')
    273 []
    274 
    275 # Cleanup
    276 >>> rpc_interface.host_remove_labels(id='host2', labels=['label1'])
    277 >>> rpc_interface.host_remove_labels(id='host1', labels=['label1'])
    278 
    279 
    280 # Other interface for new CLI
    281 # add hosts to labels
    282 >>> rpc_interface.label_add_hosts(id='label1', hosts=['host1'])
    283 >>> rpc_interface.label_add_hosts(id='label2', hosts=['host1', 'host2'])
    284 
    285 # check labels for hosts
    286 >>> data = rpc_interface.get_hosts(hostname='host1')
    287 >>> data[0]['labels']
    288 [u'label1', u'label2']
    289 >>> data = rpc_interface.get_hosts(hostname='host2')
    290 >>> data[0]['labels']
    291 [u'label2']
    292 >>> data[0]['platform']
    293 u'label2'
    294 
    295 # check host lists for labels -- use double underscore to specify fields of
    296 # related objects
    297 >>> data = rpc_interface.get_hosts(labels__name='label1')
    298 >>> [host['hostname'] for host in data]
    299 [u'host1']
    300 >>> data = rpc_interface.get_hosts(labels__name='label2')
    301 >>> [host['hostname'] for host in data]
    302 [u'host1', u'host2']
    303 
    304 # remove a host from a label
    305 >>> rpc_interface.label_remove_hosts(id='label2', hosts=['host2'])
    306 >>> data = rpc_interface.get_hosts(hostname='host1')
    307 >>> data[0]['labels']
    308 [u'label1', u'label2']
    309 >>> data = rpc_interface.get_hosts(labels__name='label2')
    310 >>> [host['hostname'] for host in data]
    311 [u'host1']
    312 
    313 # Remove multiple hosts from a label
    314 >>> rpc_interface.label_add_hosts(id='label2', hosts=['host2'])
    315 >>> data = rpc_interface.get_hosts(labels__name='label2')
    316 >>> [host['hostname'] for host in data]
    317 [u'host1', u'host2']
    318 >>> rpc_interface.label_remove_hosts(id='label2', hosts=['host2', 'host1'])
    319 >>> rpc_interface.get_hosts(labels__name='label2')
    320 []
    321 
    322 
    323 # ACL group relationships work similarly
    324 # note that all users are a member of 'Everyone' by default, and that hosts are
    325 # automatically made a member of 'Everyone' only when they are a member of no
    326 # other group
    327 >>> data = rpc_interface.get_acl_groups(hosts__hostname='host1')
    328 >>> [acl_group['name'] for acl_group in data]
    329 [u'Everyone']
    330 
    331 >>> rpc_interface.add_user(login='showard', access_level=0)
    332 2
    333 >>> rpc_interface.add_acl_group(name='my_group')
    334 2
    335 
    336 >>> rpc_interface.acl_group_add_users('my_group', ['showard'])
    337 >>> rpc_interface.acl_group_add_hosts('my_group', ['host1'])
    338 >>> data = rpc_interface.get_acl_groups(name='my_group')
    339 >>> data[0]['users']
    340 [u'debug_user', u'showard']
    341 >>> data[0]['hosts']
    342 [u'host1']
    343 >>> data = rpc_interface.get_acl_groups(users__login='showard')
    344 >>> [acl_group['name'] for acl_group in data]
    345 [u'Everyone', u'my_group']
    346 
    347 # note host has been automatically removed from 'Everyone'
    348 >>> data = rpc_interface.get_acl_groups(hosts__hostname='host1')
    349 >>> [acl_group['name'] for acl_group in data]
    350 [u'my_group']
    351 
    352 >>> rpc_interface.acl_group_remove_users('my_group', ['showard'])
    353 >>> rpc_interface.acl_group_remove_hosts('my_group', ['host1'])
    354 >>> data = rpc_interface.get_acl_groups(name='my_group')
    355 >>> data[0]['users'], data[0]['hosts']
    356 ([u'debug_user'], [])
    357 >>> data = rpc_interface.get_acl_groups(users__login='showard')
    358 >>> [acl_group['name'] for acl_group in data]
    359 [u'Everyone']
    360 
    361 # note host has been automatically added back to 'Everyone'
    362 >>> data = rpc_interface.get_acl_groups(hosts__hostname='host1')
    363 >>> [acl_group['name'] for acl_group in data]
    364 [u'Everyone']
    365 
    366 
    367 # host attributes
    368 
    369 >>> rpc_interface.set_host_attribute('color', 'red', hostname='host1')
    370 >>> data = rpc_interface.get_hosts(hostname='host1')
    371 >>> data[0]['attributes']
    372 {u'color': u'red'}
    373 
    374 >>> rpc_interface.set_host_attribute('color', None, hostname='host1')
    375 >>> data = rpc_interface.get_hosts(hostname='host1')
    376 >>> data[0]['attributes']
    377 {}
    378 
    379 
    380 # host bulk modify
    381 ##################
    382 
    383 >>> rpc_interface.modify_hosts(
    384 ...     host_filter_data={'hostname__in': ['host1', 'host2']},
    385 ...     update_data={'locked': True, 'lock_reason': 'Locked for testing'})
    386 >>> data = rpc_interface.get_hosts(hostname__in=['host1', 'host2'])
    387 
    388 >>> data[0]['locked']
    389 True
    390 >>> data[1]['locked']
    391 True
    392 
    393 >>> rpc_interface.modify_hosts(
    394 ...     host_filter_data={'id': 2},
    395 ...     update_data={'locked': False})
    396 >>> data = rpc_interface.get_hosts(hostname__in=['host1', 'host2'])
    397 
    398 >>> data[0]['locked']
    399 False
    400 >>> data[1]['locked']
    401 True
    402 
    403 
    404 # job management
    405 # ############
    406 
    407 # note that job functions require job IDs to identify jobs, since job names are
    408 # not unique
    409 
    410 # add some entries to play with
    411 >>> rpc_interface.add_label(name='my_label', kernel_config='my_kernel_config')
    412 5
    413 >>> test_control_path = os.path.join(test_path, 'test.control')
    414 >>> rpc_interface.add_test(name='sleeptest', test_type='Client', author='Test',
    415 ...                        test_category='Test',
    416 ...                        test_class='Kernel', path=test_control_path)
    417 1
    418 >>> test_control_path = os.path.join(test_path, 'test.control.2')
    419 >>> rpc_interface.add_test(name='my_test', test_type='Client', author='Test',
    420 ...                        test_category='Test',
    421 ...                        test_class='Kernel', path=test_control_path)
    422 2
    423 >>> rpc_interface.add_host(hostname='my_label_host1')
    424 4
    425 >>> rpc_interface.add_host(hostname='my_label_host2')
    426 5
    427 >>> rpc_interface.label_add_hosts(id='my_label', hosts=['my_label_host1', 'my_label_host2'])
    428 
    429 # generate a control file
    430 >>> cf_info = rpc_interface.generate_control_file(
    431 ...     tests=['sleeptest', 'my_test'],
    432 ...     kernel=[{'version': '2.6.18'}, {'version': '2.6.18-blah.rpm'},
    433 ...             {'version': '2.6.26', 'cmdline': 'foo bar'}],
    434 ...     label='my_label')
    435 >>> print cf_info['control_file'] #doctest: +NORMALIZE_WHITESPACE
    436 kernel_list = [{'version': '2.6.18', 'config_file': u'my_kernel_config'}, {'version': '2.6.18-blah.rpm', 'config_file': None}, {'cmdline': 'foo bar', 'version': '2.6.26', 'config_file': u'my_kernel_config'}]
    437 def step_init():
    438     for kernel_info in kernel_list:
    439         job.next_step(boot_kernel, kernel_info)
    440         job.next_step(step_test, kernel_info['version'])
    441     if len(kernel_list) > 1:
    442         job.use_sequence_number = True  # include run numbers in directory names
    443 def boot_kernel(kernel_info):
    444     # remove kernels (and associated data) not referenced by the bootloader
    445     for host in job.hosts:
    446         host.cleanup_kernels()
    447     testkernel = job.kernel(kernel_info['version'])
    448     if kernel_info['config_file']:
    449         testkernel.config(kernel_info['config_file'])
    450     testkernel.build()
    451     testkernel.install()
    452     cmdline = ' '.join((kernel_info.get('cmdline', ''), ''))
    453     testkernel.boot(args=cmdline)
    454 def step_test(kernel_version):
    455     global kernel
    456     kernel = kernel_version  # Set the global in case anyone is using it.
    457     if len(kernel_list) > 1:
    458         # this is local to a machine, safe to assume there's only one host
    459         host, = job.hosts
    460         job.automatic_test_tag = host.get_kernel_ver()
    461     job.next_step('step0')
    462     job.next_step('step1')
    463 def step0():
    464     job.run_test('testname')
    465 def step1():
    466     job.run_test('testname')
    467 >>> cf_info['is_server'], cf_info['synch_count'], cf_info['dependencies']
    468 (False, 1, [])
    469 
    470 # generate a control file from existing body text.
    471 >>> cf_info_pi = rpc_interface.generate_control_file(
    472 ...     kernel=[{'version': '3.1.41'}], label='my_label',
    473 ...     client_control_file='print "Hi"\n')
    474 >>> print cf_info_pi['control_file'] #doctest: +NORMALIZE_WHITESPACE
    475 kernel_list = [{'version': '3.1.41', 'config_file': u'my_kernel_config'}]
    476 def step_init():
    477     for kernel_info in kernel_list:
    478         job.next_step(boot_kernel, kernel_info)
    479         job.next_step(step_test, kernel_info['version'])
    480     if len(kernel_list) > 1:
    481         job.use_sequence_number = True  # include run numbers in directory names
    482 def boot_kernel(kernel_info):
    483     # remove kernels (and associated data) not referenced by the bootloader
    484     for host in job.hosts:
    485         host.cleanup_kernels()
    486     testkernel = job.kernel(kernel_info['version'])
    487     if kernel_info['config_file']:
    488         testkernel.config(kernel_info['config_file'])
    489     testkernel.build()
    490     testkernel.install()
    491     cmdline = ' '.join((kernel_info.get('cmdline', ''), ''))
    492     testkernel.boot(args=cmdline)
    493 def step_test(kernel_version):
    494     global kernel
    495     kernel = kernel_version  # Set the global in case anyone is using it.
    496     if len(kernel_list) > 1:
    497         # this is local to a machine, safe to assume there's only one host
    498         host, = job.hosts
    499         job.automatic_test_tag = host.get_kernel_ver()
    500     job.next_step('step0')
    501 def step0():
    502     print "Hi"
    503     return locals()
    504 
    505 # create a job to run on host1, host2, and any two machines in my_label
    506 >>> rpc_interface.create_job(name='my_job',
    507 ...                          priority=10,
    508 ...                          control_file=cf_info['control_file'],
    509 ...                          control_type='Client',
    510 ...                          hosts=['host1', 'host2'],
    511 ...                          meta_hosts=['my_label', 'my_label'])
    512 1
    513 
    514 # get job info - this does not include status info for particular hosts
    515 >>> data = rpc_interface.get_jobs()
    516 >>> data = data[0]
    517 >>> data['id'], data['owner'], data['name'], data['priority']
    518 (1, u'debug_user', u'my_job', 10)
    519 >>> data['control_file'] == cf_info['control_file']
    520 True
    521 >>> data['control_type']
    522 'Client'
    523 
    524 >>> today = datetime.date.today()
    525 >>> data['created_on'].startswith(
    526 ...         '%d-%02d-%02d' % (today.year, today.month, today.day))
    527 True
    528 
    529 # get_num_jobs - useful when dealing with large numbers of jobs
    530 >>> rpc_interface.get_num_jobs(name='my_job')
    531 1
    532 
    533 # check host queue entries for a job
    534 >>> data = rpc_interface.get_host_queue_entries(job=1)
    535 >>> len(data)
    536 4
    537 
    538 # get rid of created_on, it's nondeterministic
    539 >>> data[0]['job']['created_on'] = data[2]['job']['created_on'] = None
    540 
    541 # get_host_queue_entries returns full info about the job within each queue entry
    542 >>> job = data[0]['job']
    543 >>> job == {'control_file': cf_info['control_file'], # the control file we used
    544 ...         'control_type': 'Client',
    545 ...         'created_on': None,
    546 ...         'id': 1,
    547 ...         'name': 'my_job',
    548 ...         'owner': 'debug_user',
    549 ...         'priority': 10,
    550 ...         'synch_count': 0,
    551 ...         'timeout': 24,
    552 ...         'timeout_mins': 1440,
    553 ...         'max_runtime_mins': 1440,
    554 ...         'max_runtime_hrs' : 72,
    555 ...         'run_verify': False,
    556 ...         'run_reset': True,
    557 ...         'email_list': '',
    558 ...         'reboot_before': 'If dirty',
    559 ...         'reboot_after': 'Never',
    560 ...         'parse_failed_repair': True,
    561 ...         'drone_set': drone_set,
    562 ...         'parameterized_job': None,
    563 ...         'test_retry': 0,
    564 ...         'parent_job': None,
    565 ...         'shard': None,
    566 ...         'require_ssp': None}
    567 True
    568 
    569 # get_host_queue_entries returns a lot of data, so let's only check a couple
    570 >>> data[0] == (
    571 ... {'active': 0,
    572 ...  'complete': 0,
    573 ...  'host': {'hostname': 'host1', # full host info here
    574 ...           'id': 2,
    575 ...           'invalid': 0,
    576 ...           'locked': 0,
    577 ...           'status': 'Ready',
    578 ...           'synch_id': None,
    579 ...           'protection': 'No protection',
    580 ...           'locked_by': None,
    581 ...           'lock_time': None,
    582 ...           'lock_reason': 'Locked for testing',
    583 ...           'dirty': True,
    584 ...           'leased': 1,
    585 ...           'shard': None},
    586 ...  'id': 1,
    587 ...  'job': job, # full job info here
    588 ...  'meta_host': None,
    589 ...  'status': 'Queued',
    590 ...  'deleted': 0,
    591 ...  'execution_subdir': '',
    592 ...  'atomic_group': None,
    593 ...  'aborted': False,
    594 ...  'started_on': None,
    595 ...  'finished_on': None,
    596 ...  'full_status': 'Queued'})
    597 True
    598 >>> data[2] == (
    599 ... {'active': 0,
    600 ...  'complete': 0,
    601 ...  'host': None,
    602 ...  'id': 3,
    603 ...  'job': job,
    604 ...  'meta_host': 'my_label',
    605 ...  'status': 'Queued',
    606 ...  'deleted': 0,
    607 ...  'execution_subdir': '',
    608 ...  'atomic_group': None,
    609 ...  'aborted': False,
    610 ...  'started_on': None,
    611 ...  'finished_on': None,
    612 ...  'full_status': 'Queued'})
    613 True
    614 >>> rpc_interface.get_num_host_queue_entries(job=1)
    615 4
    616 >>> rpc_interface.get_hqe_percentage_complete(job=1)
    617 0.0
    618 
    619 # get_jobs_summary adds status counts to the rest of the get_jobs info
    620 >>> data = rpc_interface.get_jobs_summary()
    621 >>> counts = data[0]['status_counts']
    622 >>> counts
    623 {u'Queued': 4}
    624 
    625 # abort the job
    626 >>> data = rpc_interface.abort_host_queue_entries(job__id=1)
    627 >>> data = rpc_interface.get_jobs_summary(id=1)
    628 >>> data[0]['status_counts']
    629 {u'Aborted (Queued)': 4}
    630 
    631 # Remove the two hosts in my_label
    632 >>> rpc_interface.delete_host(id='my_label_host1')
    633 >>> rpc_interface.delete_host(id='my_label_host2')
    634 
    635 
    636 # extra querying parameters
    637 # #########################
    638 
    639 # get_* methods can take query_start and query_limit arguments to implement
    640 # paging and a sort_by argument to specify the sort column
    641 >>> data = rpc_interface.get_hosts(query_limit=1)
    642 >>> [host['hostname'] for host in data]
    643 [u'host1']
    644 >>> data = rpc_interface.get_hosts(query_start=1, query_limit=1)
    645 >>> [host['hostname'] for host in data]
    646 [u'host2']
    647 
    648 # sort_by = ['-hostname'] indicates sorting in descending order by hostname
    649 >>> data = rpc_interface.get_hosts(sort_by=['-hostname'])
    650 >>> [host['hostname'] for host in data]
    651 [u'host2', u'host1']
    652 
    653 
    654 # cloning a job
    655 # #############
    656 
    657 >>> job_id = rpc_interface.create_job(name='my_job_to_clone',
    658 ...                                   priority=50,
    659 ...                                   control_file=cf_info['control_file'],
    660 ...                                   control_type='Client',
    661 ...                                   hosts=['host2'],
    662 ...                                   synch_count=1)
    663 >>> info = rpc_interface.get_info_for_clone(job_id, False)
    664 >>> info['atomic_group_name']
    665 >>> info['meta_host_counts']
    666 {}
    667 >>> info['job']['dependencies']
    668 []
    669 >>> info['job']['priority']
    670 50
    671 
    672 
    673 # advanced usage
    674 # ##############
    675 
    676 # synch_count
    677 >>> job_id = rpc_interface.create_job(name='my_job',
    678 ...                          priority=10,
    679 ...                          control_file=cf_info['control_file'],
    680 ...                          control_type='Server',
    681 ...                          synch_count=2,
    682 ...                          hosts=['host1', 'host2'])
    683 
    684 >>> data = rpc_interface.get_jobs(id=job_id)
    685 >>> data[0]['synch_count']
    686 2
    687 
    688 # get hosts ACL'd to a user
    689 >>> hosts = rpc_interface.get_hosts(aclgroup__users__login='debug_user')
    690 >>> sorted([host['hostname'] for host in hosts])
    691 [u'host1', u'host2']
    692 
    693 >>> rpc_interface.add_acl_group(name='mygroup')
    694 3
    695 >>> rpc_interface.acl_group_add_users('mygroup', ['debug_user'])
    696 >>> rpc_interface.acl_group_add_hosts('mygroup', ['host1'])
    697 >>> data = rpc_interface.get_acl_groups(name='Everyone')[0]
    698 >>> data['users'], data['hosts']
    699 ([u'debug_user', u'showard'], [u'host2'])
    700 >>> data = rpc_interface.get_acl_groups(name='mygroup')[0]
    701 >>> data['users'], data['hosts']
    702 ([u'debug_user'], [u'host1'])
    703 
    704 >>> hosts = rpc_interface.get_hosts(aclgroup__users__login='debug_user')
    705 >>> sorted([host['hostname'] for host in hosts])
    706 [u'host1', u'host2']
    707 >>> hosts = rpc_interface.get_hosts(aclgroup__users__login='showard')
    708 >>> [host['hostname'] for host in hosts]
    709 [u'host2']
    710 
    711 >>> rpc_interface.delete_acl_group('mygroup')
    712 >>> data = rpc_interface.get_acl_groups(name='Everyone')[0]
    713 >>> sorted(data['hosts'])
    714 [u'host1', u'host2']
    715 
    716 # atomic groups
    717 # #############
    718 
    719 # Add an atomic group and associate some labels and new hosts with it.
    720 >>> mini_rack_group_id = rpc_interface.add_atomic_group(
    721 ...         name='mini rack',
    722 ...         max_number_of_machines=10,
    723 ...         description='a partial rack-o-machines')
    724 
    725 >>> label_id = rpc_interface.add_label(name='one-label')
    726 >>> rpc_interface.modify_label(label_id, atomic_group='mini rack')
    727 >>> labels = rpc_interface.get_labels(id=label_id)
    728 >>> assert labels[0]['atomic_group']['id'] == mini_rack_group_id, labels
    729 >>> rpc_interface.modify_label(label_id, atomic_group=None)
    730 >>> labels = rpc_interface.get_labels(id=label_id)
    731 >>> assert not labels[0]['atomic_group'], labels
    732 >>> rpc_interface.modify_label(label_id, atomic_group='mini rack')
    733 >>> labels = rpc_interface.get_labels(id=label_id)
    734 >>> assert labels[0]['atomic_group']['id'] == mini_rack_group_id, labels
    735 >>> data = rpc_interface.get_labels(atomic_group__name='mini rack')
    736 >>> assert len(data) == 1
    737 >>> assert data[0]['name'] == 'one-label', data
    738 >>> assert data[0]['atomic_group']['id'] == mini_rack_group_id, data
    739 
    740 >>> data = rpc_interface.get_atomic_groups()
    741 >>> assert len(data) == 1
    742 >>> assert data[0]['id'] == mini_rack_group_id, data
    743 >>> assert data[0]['max_number_of_machines'] == 10, data
    744 >>> assert data[0]['description'] == 'a partial rack-o-machines', data
    745 
    746 >>> rpc_interface.modify_atomic_group(1, max_number_of_machines=8)
    747 >>> data = rpc_interface.get_atomic_groups()
    748 >>> assert data[0]['max_number_of_machines'] == 8, data
    749 
    750 >>> unused = rpc_interface.add_host(hostname='ahost1')
    751 >>> unused = rpc_interface.add_host(hostname='ahost2')
    752 >>> unused = rpc_interface.add_host(hostname='ah3-blue')
    753 >>> unused = rpc_interface.add_host(hostname='ah4-blue')
    754 >>> two_id = rpc_interface.add_label(name='two-label')
    755 >>> rpc_interface.label_add_hosts(
    756 ...        id=two_id, hosts=['ahost1', 'ahost2', 'ah3-blue', 'ah4-blue'])
    757 >>> unused = rpc_interface.add_label(name='red-label')
    758 >>> blue_id = rpc_interface.add_label(name='blue-label')
    759 >>> rpc_interface.label_add_hosts(id=blue_id, hosts=['ah3-blue', 'ah4-blue'])
    760 
    761 >>> rpc_interface.atomic_group_add_labels(mini_rack_group_id,
    762 ...                                       ['one-label', 'two-label',
    763 ...                                        'red-label'])
    764 >>> ag_labels = rpc_interface.get_labels(atomic_group__name='mini rack')
    765 >>> len(ag_labels)
    766 3
    767 >>> hosts_in_two = rpc_interface.get_hosts(multiple_labels=['two-label'])
    768 >>> list(sorted(h['hostname'] for h in hosts_in_two))
    769 [u'ah3-blue', u'ah4-blue', u'ahost1', u'ahost2']
    770 >>> rpc_interface.atomic_group_remove_labels(mini_rack_group_id, ['red-label'])
    771 >>> ag_labels = rpc_interface.get_labels(atomic_group__name='mini rack')
    772 >>> sorted(label['name'] for label in ag_labels)
    773 [u'one-label', u'two-label']
    774 
    775 >>> host_list = rpc_interface.get_hosts()
    776 >>> hosts_by_name = {}
    777 >>> for host in host_list:
    778 ...     hosts_by_name[host['hostname']] = host
    779 ...
    780 >>> hosts_by_name['host1']['atomic_group']
    781 >>> hosts_by_name['ahost1']['atomic_group']
    782 u'mini rack'
    783 >>> hosts_by_name['ah3-blue']['atomic_group']
    784 u'mini rack'
    785 >>> host_list = rpc_interface.get_hosts(labels__atomic_group__name='mini rack')
    786 >>> list(sorted(h['hostname'] for h in host_list))
    787 [u'ah3-blue', u'ah4-blue', u'ahost1', u'ahost2']
    788 
    789 
    790 
    791 ## Test creation of a job in an atomic group without specifying any
    792 ## hosts or meta_hosts.
    793 
    794 >>> sleep_cf_info = rpc_interface.generate_control_file(
    795 ...         tests=['sleeptest'],  kernel=[{'version': '2.6.18'}],
    796 ...         label='two-label')
    797 >>> job_id = rpc_interface.create_job(
    798 ...         name='atomic_sleeptest', priority=30,
    799 ...         control_file=sleep_cf_info['control_file'],
    800 ...         control_type='Server', synch_count=1,
    801 ...         atomic_group_name='mini rack')
    802 
    803 ## Test creation of a job in an atomic group by specifying the atomic group
    804 ## name as a meta_host rather than explicitly using the atomic_group_name
    805 ## parameter.
    806 
    807 >>> job_id = rpc_interface.create_job(
    808 ...         name='atomic_sleeptest', priority=30,
    809 ...         control_file=sleep_cf_info['control_file'],
    810 ...         control_type='Server', synch_count=1,
    811 ...         meta_hosts=['mini rack'])
    812 >>> job_id = rpc_interface.create_job(
    813 ...         name='atomic_sleeptest', priority=30,
    814 ...         control_file=sleep_cf_info['control_file'],
    815 ...         control_type='Server', synch_count=1,
    816 ...         meta_hosts=['mini rack'],
    817 ...         atomic_group_name='Different')
    818 Traceback (most recent call last):
    819 ValidationError: {'meta_hosts': 'Label "mini rack" not found.  If assumed to be an atomic group it would conflict with the supplied atomic group "Different".'}
    820 
    821 ## Test job creation with an atomic group.
    822 
    823 # fail to create a job in an atomic group.  one_time_hosts not allowed.
    824 >>> rpc_interface.create_job(name='my_atomic_job',
    825 ...                          priority=50,
    826 ...                          control_file=cf_info['control_file'],
    827 ...                          control_type='Server',
    828 ...                          one_time_hosts=['hostX', 'hostY'],
    829 ...                          synch_count=2,
    830 ...                          atomic_group_name='mini rack')
    831 Traceback (most recent call last):
    832 ValidationError: {'one_time_hosts': 'One time hosts cannot be used with an Atomic Group.'}
    833 
    834 # fail to create a job in an atomic group.  Synch count larger than max
    835 >>> rpc_interface.create_job(name='my_atomic_job',
    836 ...                          priority=50,
    837 ...                          control_file=cf_info['control_file'],
    838 ...                          control_type='Server',
    839 ...                          synch_count=25,
    840 ...                          atomic_group_name='mini rack')
    841 Traceback (most recent call last):
    842 ValidationError: {'atomic_group_name': 'You have requested a synch_count (25) greater than the maximum machines in the requested Atomic Group (8).'}
    843 
    844 # fail to create a job in an atomic group.  not enough hosts due to host list.
    845 >>> rpc_interface.create_job(name='my_atomic_job',
    846 ...                          priority=50,
    847 ...                          control_file=cf_info['control_file'],
    848 ...                          control_type='Server',
    849 ...                          hosts=['ahost1', 'ahost2'],
    850 ...                          synch_count=3,
    851 ...                          atomic_group_name='mini rack')
    852 Traceback (most recent call last):
    853 ValidationError: {'hosts': 'only 2 hosts provided for job with synch_count = 3'}
    854 
    855 # fail to create a job in an atomic group.  hosts not in atomic group.
    856 >>> rpc_interface.create_job(name='my_atomic_job',
    857 ...                          priority=50,
    858 ...                          control_file=cf_info['control_file'],
    859 ...                          control_type='Server',
    860 ...                          hosts=['host1', 'host2'],
    861 ...                          synch_count=2,
    862 ...                          atomic_group_name='mini rack')
    863 Traceback (most recent call last):
    864 ValidationError: {'hosts': u'Hosts "host1, host2" are not in Atomic Group "mini rack"'}
    865 
    866 # fail to create a job in an atomic group.  not enough hosts due to meta_hosts.
    867 >>> rpc_interface.create_job(name='my_atomic_job',
    868 ...                          priority=50,
    869 ...                          control_file=cf_info['control_file'],
    870 ...                          control_type='Server',
    871 ...                          meta_hosts=['blue-label'],
    872 ...                          synch_count=4,
    873 ...                          atomic_group_name='mini rack')
    874 Traceback (most recent call last):
    875 ValidationError: {'atomic_group_name': u'Insufficient hosts in Atomic Group "mini rack" with the supplied dependencies and meta_hosts.'}
    876 
    877 # fail to create a job in an atomic group.  not enough hosts.
    878 >>> rpc_interface.create_job(name='my_atomic_job',
    879 ...                          priority=50,
    880 ...                          control_file=cf_info['control_file'],
    881 ...                          control_type='Server',
    882 ...                          synch_count=5,
    883 ...                          atomic_group_name='mini rack')
    884 Traceback (most recent call last):
    885 ValidationError: {'atomic_group_name': u'Insufficient hosts in Atomic Group "mini rack" with the supplied dependencies and meta_hosts.'}
    886 
    887 # create a job in an atomic group.
    888 >>> job_id = rpc_interface.create_job(name='my_atomic_job',
    889 ...                                   priority=50,
    890 ...                                   control_file=cf_info['control_file'],
    891 ...                                   control_type='Server',
    892 ...                                   hosts=['ahost1', 'ahost2'],
    893 ...                                   meta_hosts=['blue-label'],
    894 ...                                   synch_count=4,
    895 ...                                   atomic_group_name='mini rack')
    896 
    897 >>> data = rpc_interface.get_host_queue_entries(job__id=job_id)
    898 >>> data[0]['atomic_group']['id']
    899 1
    900 
    901 # create a job using hosts in an atomic group but forget to specify the group.
    902 >>> rpc_interface.create_job(name='poke_foo',
    903 ...                          priority=10,
    904 ...                          control_file=cf_info['control_file'],
    905 ...                          control_type='Client',
    906 ...                          hosts=['ahost1', 'ahost2'])
    907 Traceback (most recent call last):
    908 ValidationError: {'hosts': u'Host(s) "ahost1, ahost2" are atomic group hosts but no atomic group was specified for this job.'}
    909 
    910 # Create a job using a label in an atomic group as the meta-host but forget
    911 # to specify the group.  The frontend should figure this out for us.
    912 >>> job_id = rpc_interface.create_job(name='created_without_explicit_ag',
    913 ...                          priority=50,
    914 ...                          control_file=cf_info['control_file'],
    915 ...                          control_type='Client',
    916 ...                          meta_hosts=['two-label'])
    917 
    918 >>> job_id = rpc_interface.create_job(
    919 ...         name='atomic_sleeptest', priority=30,
    920 ...         control_file=sleep_cf_info['control_file'],
    921 ...         control_type='Server', synch_count=1,
    922 ...         meta_hosts=['two-label'],
    923 ...         dependencies=['blue-label'])
    924 >>> peon_user = models.User(login='peon_user')
    925 >>> peon_user.access_level = 0
    926 >>> from autotest_lib.client.common_lib.test_utils import mock
    927 >>> god = mock.mock_god()
    928 >>> god.stub_function(models.User, "current_user")
    929 >>> models.User.current_user.expect_call().and_return(peon_user)
    930 >>> rpc_interface.abort_host_queue_entries(job__id=job_id)
    931 Traceback (most recent call last):
    932 AclAccessViolation: You cannot abort the following job entries: 8-debug_user/two-label
    933 >>> god.check_playback()
    934 >>> god.unstub_all()
    935 
    936 >>> rpc_interface.create_job(name='never_run2',
    937 ...                          priority=50,
    938 ...                          control_file=cf_info['control_file'],
    939 ...                          control_type='Client',
    940 ...                          meta_hosts=['blue-label'],
    941 ...                          dependencies=['two-label'])
    942 Traceback (most recent call last):
    943 ValidationError: {'atomic_group_name': "Dependency u'two-label' requires an atomic group but no atomic_group_name or meta_host in an atomic group was specified for this job."}
    944 
    945 >>> invisible_group_id = rpc_interface.add_atomic_group(
    946 ...         name='invisible rack',
    947 ...         max_number_of_machines=3,
    948 ...         description='a hidden rack-o-machines')
    949 >>> rpc_interface.atomic_group_add_labels(invisible_group_id,
    950 ...                                       ['blue-label'])
    951 >>> rpc_interface.create_job(name='never_run3',
    952 ...                          priority=50,
    953 ...                          control_file=cf_info['control_file'],
    954 ...                          control_type='Client',
    955 ...                          meta_hosts=['two-label'],
    956 ...                          atomic_group_name='invisible rack')
    957 Traceback (most recent call last):
    958 ValidationError: {'atomic_group_name': "meta_hosts or dependency u'two-label' requires atomic group u'mini rack' instead of the supplied atomic_group_name=u'invisible rack'."}
    959 
    960 # we're done testing atomic groups, clean up
    961 >>> rpc_interface.delete_atomic_group(invisible_group_id)
    962 >>> rpc_interface.delete_atomic_group(mini_rack_group_id)
    963 >>> assert len(rpc_interface.get_atomic_groups()) == 0
    964