Home | History | Annotate | Download | only in scheduler
      1 #!/usr/bin/python
      2 #pylint: disable-msg=C0111
      3 
      4 # Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
      5 # Use of this source code is governed by a BSD-style license that can be
      6 # found in the LICENSE file.
      7 import collections
      8 import unittest
      9 
     10 import common
     11 from autotest_lib.client.common_lib import host_queue_entry_states
     12 from autotest_lib.frontend import setup_django_environment
     13 from autotest_lib.frontend.afe import frontend_test_utils
     14 from autotest_lib.frontend.afe import models
     15 from autotest_lib.frontend.afe import rdb_model_extensions
     16 from autotest_lib.scheduler import rdb
     17 from autotest_lib.scheduler import rdb_hosts
     18 from autotest_lib.scheduler import rdb_lib
     19 from autotest_lib.scheduler import rdb_requests
     20 from autotest_lib.scheduler import rdb_testing_utils
     21 from autotest_lib.server.cros import provision
     22 
     23 
     24 class AssignmentValidator(object):
     25     """Utility class to check that priority inversion doesn't happen. """
     26 
     27 
     28     @staticmethod
     29     def check_acls_deps(host, request):
     30         """Check if a host and request match by comparing acls and deps.
     31 
     32         @param host: A dictionary representing attributes of the host.
     33         @param request: A request, as defined in rdb_requests.
     34 
     35         @return True if the deps/acls of the request match the host.
     36         """
     37         # Unfortunately the hosts labels are labelnames, not ids.
     38         request_deps = set([l.name for l in
     39                 models.Label.objects.filter(id__in=request.deps)])
     40         return (set(host['labels']).intersection(request_deps) == request_deps
     41                 and set(host['acls']).intersection(request.acls))
     42 
     43 
     44     @staticmethod
     45     def find_matching_host_for_request(hosts, request):
     46         """Find a host from the given list of hosts, matching the request.
     47 
     48         @param hosts: A list of dictionaries representing host attributes.
     49         @param requetst: The unsatisfied request.
     50 
     51         @return: A host, if a matching host is found from the input list.
     52         """
     53         if not hosts or not request:
     54             return None
     55         for host in hosts:
     56             if AssignmentValidator.check_acls_deps(host, request):
     57                 return host
     58 
     59 
     60     @staticmethod
     61     def sort_requests(requests):
     62         """Sort the requests by priority.
     63 
     64         @param requests: Unordered requests.
     65 
     66         @return: A list of requests ordered by priority.
     67         """
     68         return sorted(collections.Counter(requests).items(),
     69                 key=lambda request: request[0].priority, reverse=True)
     70 
     71 
     72     @staticmethod
     73     def verify_priority(request_queue, result):
     74         requests = AssignmentValidator.sort_requests(request_queue)
     75         for request, count in requests:
     76             hosts = result.get(request)
     77             # The request was completely satisfied.
     78             if hosts and len(hosts) == count:
     79                 continue
     80             # Go through all hosts given to lower priority requests and
     81             # make sure we couldn't have allocated one of them for this
     82             # unsatisfied higher priority request.
     83             lower_requests = requests[requests.index((request,count))+1:]
     84             for lower_request, count in lower_requests:
     85                 if (lower_request.priority < request.priority and
     86                     AssignmentValidator.find_matching_host_for_request(
     87                             result.get(lower_request), request)):
     88                     raise ValueError('Priority inversion occured between '
     89                             'priorities %s and %s' %
     90                             (request.priority, lower_request.priority))
     91 
     92 
     93     @staticmethod
     94     def priority_checking_response_handler(request_manager):
     95         """Fake response handler wrapper for any request_manager.
     96 
     97         Check that higher priority requests get a response over lower priority
     98         requests, by re-validating all the hosts assigned to a lower priority
     99         request against the unsatisfied higher priority ones.
    100 
    101         @param request_manager: A request_manager as defined in rdb_lib.
    102 
    103         @raises ValueError: If priority inversion is detected.
    104         """
    105         # Fist call the rdb to make its decisions, then sort the requests
    106         # by priority and make sure unsatisfied requests higher up in the list
    107         # could not have been satisfied by hosts assigned to requests lower
    108         # down in the list.
    109         result = request_manager.api_call(request_manager.request_queue)
    110         if not result:
    111             raise ValueError('Expected results but got none.')
    112         AssignmentValidator.verify_priority(
    113                 request_manager.request_queue, result)
    114         for hosts in result.values():
    115             for host in hosts:
    116                 yield host
    117 
    118 
    119 class BaseRDBTest(rdb_testing_utils.AbstractBaseRDBTester, unittest.TestCase):
    120     _config_section = 'AUTOTEST_WEB'
    121 
    122 
    123     def testAcquireLeasedHostBasic(self):
    124         """Test that acquisition of a leased host doesn't happen.
    125 
    126         @raises AssertionError: If the one host that satisfies the request
    127             is acquired.
    128         """
    129         job = self.create_job(deps=set(['a']))
    130         host = self.db_helper.create_host('h1', deps=set(['a']))
    131         host.leased = 1
    132         host.save()
    133         queue_entries = self._dispatcher._refresh_pending_queue_entries()
    134         hosts = list(rdb_lib.acquire_hosts(queue_entries))
    135         self.assertTrue(len(hosts) == 1 and hosts[0] is None)
    136 
    137 
    138     def testAcquireLeasedHostRace(self):
    139         """Test behaviour when hosts are leased just before acquisition.
    140 
    141         If a fraction of the hosts somehow get leased between finding and
    142         acquisition, the rdb should just return the remaining hosts for the
    143         request to use.
    144 
    145         @raises AssertionError: If both the requests get a host successfully,
    146             since one host gets leased before the final attempt to lease both.
    147         """
    148         j1 = self.create_job(deps=set(['a']))
    149         j2 = self.create_job(deps=set(['a']))
    150         hosts = [self.db_helper.create_host('h1', deps=set(['a'])),
    151                  self.db_helper.create_host('h2', deps=set(['a']))]
    152 
    153         @rdb_hosts.return_rdb_host
    154         def local_find_hosts(host_query_manger, deps, acls):
    155             """Return a predetermined list of hosts, one of which is leased."""
    156             h1 = models.Host.objects.get(hostname='h1')
    157             h1.leased = 1
    158             h1.save()
    159             h2 = models.Host.objects.get(hostname='h2')
    160             return [h1, h2]
    161 
    162         self.god.stub_with(rdb.AvailableHostQueryManager, 'find_hosts',
    163                            local_find_hosts)
    164         queue_entries = self._dispatcher._refresh_pending_queue_entries()
    165         hosts = list(rdb_lib.acquire_hosts(queue_entries))
    166         self.assertTrue(len(hosts) == 2 and None in hosts)
    167         self.check_hosts(iter(hosts))
    168 
    169 
    170     def testHostReleaseStates(self):
    171         """Test that we will only release an unused host if it is in Ready.
    172 
    173         @raises AssertionError: If the host gets released in any other state.
    174         """
    175         host = self.db_helper.create_host('h1', deps=set(['x']))
    176         for state in rdb_model_extensions.AbstractHostModel.Status.names:
    177             host.status = state
    178             host.leased = 1
    179             host.save()
    180             self._release_unused_hosts()
    181             host = models.Host.objects.get(hostname='h1')
    182             self.assertTrue(host.leased == (state != 'Ready'))
    183 
    184 
    185     def testHostReleseHQE(self):
    186         """Test that we will not release a ready host if it's being used.
    187 
    188         @raises AssertionError: If the host is released even though it has
    189             been assigned to an active hqe.
    190         """
    191         # Create a host and lease it out in Ready.
    192         host = self.db_helper.create_host('h1', deps=set(['x']))
    193         host.status = 'Ready'
    194         host.leased = 1
    195         host.save()
    196 
    197         # Create a job and give its hqe the leased host.
    198         job = self.create_job(deps=set(['x']))
    199         self.db_helper.add_host_to_job(host, job.id)
    200         hqe = models.HostQueueEntry.objects.get(job_id=job.id)
    201 
    202         # Activate the hqe by setting its state.
    203         hqe.status = host_queue_entry_states.ACTIVE_STATUSES[0]
    204         hqe.save()
    205 
    206         # Make sure the hqes host isn't released, even if its in ready.
    207         self._release_unused_hosts()
    208         host = models.Host.objects.get(hostname='h1')
    209         self.assertTrue(host.leased == 1)
    210 
    211 
    212     def testBasicDepsAcls(self):
    213         """Test a basic deps/acls request.
    214 
    215         Make sure that a basic request with deps and acls, finds a host from
    216         the ready pool that has matching labels and is in a matching aclgroups.
    217 
    218         @raises AssertionError: If the request doesn't find a host, since the
    219             we insert a matching host in the ready pool.
    220         """
    221         deps = set(['a', 'b'])
    222         acls = set(['a', 'b'])
    223         self.db_helper.create_host('h1', deps=deps, acls=acls)
    224         job = self.create_job(user='autotest_system', deps=deps, acls=acls)
    225         queue_entries = self._dispatcher._refresh_pending_queue_entries()
    226         matching_host  = rdb_lib.acquire_hosts(queue_entries).next()
    227         self.check_host_assignment(job.id, matching_host.id)
    228         self.assertTrue(matching_host.leased == 1)
    229 
    230 
    231     def testPreferredDeps(self):
    232         """Test that perferred deps is respected.
    233 
    234         If multiple hosts satisfied a job's deps, the one with preferred
    235         label will be assigned to the job.
    236 
    237         @raises AssertionError: If a host without a preferred label is
    238                                 assigned to the job instead of one with
    239                                 a preferred label.
    240         """
    241         lumpy_deps = set(['board:lumpy'])
    242         stumpy_deps = set(['board:stumpy'])
    243         stumpy_deps_with_crosversion = set(
    244                 ['board:stumpy', 'cros-version:lumpy-release/R41-6323.0.0'])
    245 
    246         acls = set(['a', 'b'])
    247         # Hosts lumpy1 and lumpy2 are created as a control group,
    248         # which ensures that if no preferred label is used, the host
    249         # with a smaller id will be chosen first. We need to make sure
    250         # stumpy2 was chosen because it has a cros-version label, but not
    251         # because of other randomness.
    252         self.db_helper.create_host('lumpy1', deps=lumpy_deps, acls=acls)
    253         self.db_helper.create_host('lumpy2', deps=lumpy_deps, acls=acls)
    254         self.db_helper.create_host('stumpy1', deps=stumpy_deps, acls=acls)
    255         self.db_helper.create_host(
    256                     'stumpy2', deps=stumpy_deps_with_crosversion , acls=acls)
    257         job_1 = self.create_job(user='autotest_system',
    258                               deps=lumpy_deps, acls=acls)
    259         job_2 = self.create_job(user='autotest_system',
    260                               deps=stumpy_deps_with_crosversion, acls=acls)
    261         queue_entries = self._dispatcher._refresh_pending_queue_entries()
    262         matching_hosts  = list(rdb_lib.acquire_hosts(queue_entries))
    263         assignment = {}
    264         import logging
    265         for job, host in zip(queue_entries, matching_hosts):
    266             self.check_host_assignment(job.id, host.id)
    267             assignment[job.id] = host.hostname
    268         self.assertEqual(assignment[job_1.id], 'lumpy1')
    269         self.assertEqual(assignment[job_2.id], 'stumpy2')
    270 
    271 
    272     def testBadDeps(self):
    273         """Test that we find no hosts when only acls match.
    274 
    275         @raises AssertionError: If the request finds a host, since the only
    276             host in the ready pool will not have matching deps.
    277         """
    278         host_labels = set(['a'])
    279         job_deps = set(['b'])
    280         acls = set(['a', 'b'])
    281         self.db_helper.create_host('h1', deps=host_labels, acls=acls)
    282         job = self.create_job(user='autotest_system', deps=job_deps, acls=acls)
    283         queue_entries = self._dispatcher._refresh_pending_queue_entries()
    284         matching_host  = rdb_lib.acquire_hosts(queue_entries).next()
    285         self.assert_(not matching_host)
    286 
    287 
    288     def testBadAcls(self):
    289         """Test that we find no hosts when only deps match.
    290 
    291         @raises AssertionError: If the request finds a host, since the only
    292             host in the ready pool will not have matching acls.
    293         """
    294         deps = set(['a'])
    295         host_acls = set(['a'])
    296         job_acls = set(['b'])
    297         self.db_helper.create_host('h1', deps=deps, acls=host_acls)
    298 
    299         # Create the job as a new user who is only in the 'b' and 'Everyone'
    300         # aclgroups. Though there are several hosts in the Everyone group, the
    301         # 1 host that has the 'a' dep isn't.
    302         job = self.create_job(user='new_user', deps=deps, acls=job_acls)
    303         queue_entries = self._dispatcher._refresh_pending_queue_entries()
    304         matching_host  = rdb_lib.acquire_hosts(queue_entries).next()
    305         self.assert_(not matching_host)
    306 
    307 
    308     def testBasicPriority(self):
    309         """Test that priority inversion doesn't happen.
    310 
    311         Schedule 2 jobs with the same deps, acls and user, but different
    312         priorities, and confirm that the higher priority request gets the host.
    313         This confirmation happens through the AssignmentValidator.
    314 
    315         @raises AssertionError: If the un important request gets host h1 instead
    316             of the important request.
    317         """
    318         deps = set(['a', 'b'])
    319         acls = set(['a', 'b'])
    320         self.db_helper.create_host('h1', deps=deps, acls=acls)
    321         important_job = self.create_job(user='autotest_system',
    322                 deps=deps, acls=acls, priority=2)
    323         un_important_job = self.create_job(user='autotest_system',
    324                 deps=deps, acls=acls, priority=0)
    325         queue_entries = self._dispatcher._refresh_pending_queue_entries()
    326 
    327         self.god.stub_with(rdb_requests.BaseHostRequestManager, 'response',
    328                 AssignmentValidator.priority_checking_response_handler)
    329         self.check_hosts(rdb_lib.acquire_hosts(queue_entries))
    330 
    331 
    332     def testPriorityLevels(self):
    333         """Test that priority inversion doesn't happen.
    334 
    335         Increases a job's priority and makes several requests for hosts,
    336         checking that priority inversion doesn't happen.
    337 
    338         @raises AssertionError: If the unimportant job gets h1 while it is
    339             still unimportant, or doesn't get h1 while after it becomes the
    340             most important job.
    341         """
    342         deps = set(['a', 'b'])
    343         acls = set(['a', 'b'])
    344         self.db_helper.create_host('h1', deps=deps, acls=acls)
    345 
    346         # Create jobs that will bucket differently and confirm that jobs in an
    347         # earlier bucket get a host.
    348         first_job = self.create_job(user='autotest_system', deps=deps, acls=acls)
    349         important_job = self.create_job(user='autotest_system', deps=deps,
    350                 acls=acls, priority=2)
    351         deps.pop()
    352         unimportant_job = self.create_job(user='someother_system', deps=deps,
    353                 acls=acls, priority=1)
    354         queue_entries = self._dispatcher._refresh_pending_queue_entries()
    355 
    356         self.god.stub_with(rdb_requests.BaseHostRequestManager, 'response',
    357                 AssignmentValidator.priority_checking_response_handler)
    358         self.check_hosts(rdb_lib.acquire_hosts(queue_entries))
    359 
    360         # Elevate the priority of the unimportant job, so we now have
    361         # 2 jobs at the same priority.
    362         self.db_helper.increment_priority(job_id=unimportant_job.id)
    363         queue_entries = self._dispatcher._refresh_pending_queue_entries()
    364         self._release_unused_hosts()
    365         self.check_hosts(rdb_lib.acquire_hosts(queue_entries))
    366 
    367         # Prioritize the first job, and confirm that it gets the host over the
    368         # jobs that got it the last time.
    369         self.db_helper.increment_priority(job_id=unimportant_job.id)
    370         queue_entries = self._dispatcher._refresh_pending_queue_entries()
    371         self._release_unused_hosts()
    372         self.check_hosts(rdb_lib.acquire_hosts(queue_entries))
    373 
    374 
    375     def testFrontendJobScheduling(self):
    376         """Test that basic frontend job scheduling.
    377 
    378         @raises AssertionError: If the received and requested host don't match,
    379             or the mis-matching host is returned instead.
    380         """
    381         deps = set(['x', 'y'])
    382         acls = set(['a', 'b'])
    383 
    384         # Create 2 frontend jobs and only one matching host.
    385         matching_job = self.create_job(acls=acls, deps=deps)
    386         matching_host = self.db_helper.create_host('h1', acls=acls, deps=deps)
    387         mis_matching_job = self.create_job(acls=acls, deps=deps)
    388         mis_matching_host = self.db_helper.create_host(
    389                 'h2', acls=acls, deps=deps.pop())
    390         self.db_helper.add_host_to_job(matching_host, matching_job.id)
    391         self.db_helper.add_host_to_job(mis_matching_host, mis_matching_job.id)
    392 
    393         # Check that only the matching host is returned, and that we get 'None'
    394         # for the second request.
    395         queue_entries = self._dispatcher._refresh_pending_queue_entries()
    396         hosts = list(rdb_lib.acquire_hosts(queue_entries))
    397         self.assertTrue(len(hosts) == 2 and None in hosts)
    398         returned_host = [host for host in hosts if host].pop()
    399         self.assertTrue(matching_host.id == returned_host.id)
    400 
    401 
    402     def testFrontendJobPriority(self):
    403         """Test that frontend job scheduling doesn't ignore priorities.
    404 
    405         @raises ValueError: If the priorities of frontend jobs are ignored.
    406         """
    407         board = 'x'
    408         high_priority = self.create_job(priority=2, deps=set([board]))
    409         low_priority = self.create_job(priority=1, deps=set([board]))
    410         host = self.db_helper.create_host('h1', deps=set([board]))
    411         self.db_helper.add_host_to_job(host, low_priority.id)
    412         self.db_helper.add_host_to_job(host, high_priority.id)
    413 
    414         queue_entries = self._dispatcher._refresh_pending_queue_entries()
    415 
    416         def local_response_handler(request_manager):
    417             """Confirms that a higher priority frontend job gets a host.
    418 
    419             @raises ValueError: If priority inversion happens and the job
    420                 with priority 1 gets the host instead.
    421             """
    422             result = request_manager.api_call(request_manager.request_queue)
    423             if not result:
    424                 raise ValueError('Excepted the high priority request to '
    425                                  'get a host, but the result is empty.')
    426             for request, hosts in result.iteritems():
    427                 if request.priority == 1:
    428                     raise ValueError('Priority of frontend job ignored.')
    429                 if len(hosts) > 1:
    430                     raise ValueError('Multiple hosts returned against one '
    431                                      'frontend job scheduling request.')
    432                 yield hosts[0]
    433 
    434         self.god.stub_with(rdb_requests.BaseHostRequestManager, 'response',
    435                            local_response_handler)
    436         self.check_hosts(rdb_lib.acquire_hosts(queue_entries))
    437 
    438 
    439     def testSuiteOrderedHostAcquisition(self):
    440         """Test that older suite jobs acquire hosts first.
    441 
    442         Make sure older suite jobs get hosts first, but not at the expense of
    443         higher priority jobs.
    444 
    445         @raises ValueError: If unexpected acquisitions occur, eg:
    446             suite_job_2 acquires the last 2 hosts instead of suite_job_1.
    447             isolated_important_job doesn't get any hosts.
    448             Any job acquires more hosts than necessary.
    449         """
    450         board = 'x'
    451 
    452         # Create 2 suites such that the later suite has an ordering of deps
    453         # that places it ahead of the earlier suite, if parent_job_id is
    454         # ignored.
    455         suite_without_dep = self.create_suite(num=2, priority=0, board=board)
    456 
    457         suite_with_dep = self.create_suite(num=1, priority=0, board=board)
    458         self.db_helper.add_deps_to_job(suite_with_dep[0], dep_names=list('y'))
    459 
    460         # Create an important job that should be ahead of the first suite,
    461         # because priority trumps parent_job_id and time of creation.
    462         isolated_important_job = self.create_job(priority=3, deps=set([board]))
    463 
    464         # Create 3 hosts, all with the deps to satisfy the last suite.
    465         for i in range(0, 3):
    466             self.db_helper.create_host('h%s' % i, deps=set([board, 'y']))
    467 
    468         queue_entries = self._dispatcher._refresh_pending_queue_entries()
    469 
    470         def local_response_handler(request_manager):
    471             """Reorder requests and check host acquisition.
    472 
    473             @raises ValueError: If unexpected/no acquisitions occur.
    474             """
    475             if any([request for request in request_manager.request_queue
    476                     if request.parent_job_id is None]):
    477                 raise ValueError('Parent_job_id can never be None.')
    478 
    479             # This will result in the ordering:
    480             # [suite_2_1, suite_1_*, suite_1_*, isolated_important_job]
    481             # The priority scheduling order should be:
    482             # [isolated_important_job, suite_1_*, suite_1_*, suite_2_1]
    483             # Since:
    484             #   a. the isolated_important_job is the most important.
    485             #   b. suite_1 was created before suite_2, regardless of deps
    486             disorderly_queue = sorted(request_manager.request_queue,
    487                     key=lambda r: -r.parent_job_id)
    488             request_manager.request_queue = disorderly_queue
    489             result = request_manager.api_call(request_manager.request_queue)
    490             if not result:
    491                 raise ValueError('Expected results but got none.')
    492 
    493             # Verify that the isolated_important_job got a host, and that the
    494             # first suite got both remaining free hosts.
    495             for request, hosts in result.iteritems():
    496                 if request.parent_job_id == 0:
    497                     if len(hosts) > 1:
    498                         raise ValueError('First job acquired more hosts than '
    499                                 'necessary. Response map: %s' % result)
    500                     continue
    501                 if request.parent_job_id == 1:
    502                     if len(hosts) < 2:
    503                         raise ValueError('First suite job requests were not '
    504                                 'satisfied. Response_map: %s' % result)
    505                     continue
    506                 # The second suite job got hosts instead of one of
    507                 # the others. Eitherway this is a failure.
    508                 raise ValueError('Unexpected host acquisition '
    509                         'Response map: %s' % result)
    510             yield None
    511 
    512         self.god.stub_with(rdb_requests.BaseHostRequestManager, 'response',
    513                            local_response_handler)
    514         list(rdb_lib.acquire_hosts(queue_entries))
    515 
    516 
    517     def testConfigurations(self):
    518         """Test that configurations don't matter.
    519         @raises AssertionError: If the request doesn't find a host,
    520                  this will happen if configurations are not stripped out.
    521         """
    522         self.god.stub_with(provision.Cleanup,
    523                            '_actions',
    524                            {'action': 'fakeTest'})
    525         job_labels = set(['action', 'a'])
    526         host_deps = set(['a'])
    527         db_host = self.db_helper.create_host('h1', deps=host_deps)
    528         self.create_job(user='autotest_system', deps=job_labels)
    529         queue_entries = self._dispatcher._refresh_pending_queue_entries()
    530         matching_host = rdb_lib.acquire_hosts(queue_entries).next()
    531         self.assert_(matching_host.id == db_host.id)
    532 
    533 
    534 class RDBMinDutTest(
    535         rdb_testing_utils.AbstractBaseRDBTester, unittest.TestCase):
    536     """Test AvailableHostRequestHandler"""
    537 
    538     _config_section = 'AUTOTEST_WEB'
    539 
    540 
    541     def min_dut_test_helper(self, num_hosts, suite_settings):
    542         """A helper function to test min_dut logic.
    543 
    544         @param num_hosts: Total number of hosts to create.
    545         @param suite_settings: A dictionary specify how suites would be created
    546                                and verified.
    547                 E.g.  {'priority': 10, 'num_jobs': 3,
    548                        'min_duts':2, 'expected_aquired': 1}
    549                        With this setting, will create a suite that has 3
    550                        child jobs, with priority 10 and min_duts 2.
    551                        The suite is expected to get 1 dut.
    552         """
    553         acls = set(['fake_acl'])
    554         hosts = []
    555         for i in range (0, num_hosts):
    556             hosts.append(self.db_helper.create_host(
    557                 'h%d' % i, deps=set(['board:lumpy']), acls=acls))
    558         suites = {}
    559         suite_min_duts = {}
    560         for setting in suite_settings:
    561             s = self.create_suite(num=setting['num_jobs'],
    562                                   priority=setting['priority'],
    563                                   board='board:lumpy', acls=acls)
    564             # Empty list will be used to store acquired hosts.
    565             suites[s['parent_job'].id] = (setting, [])
    566             suite_min_duts[s['parent_job'].id] = setting['min_duts']
    567         queue_entries = self._dispatcher._refresh_pending_queue_entries()
    568         matching_hosts = rdb_lib.acquire_hosts(queue_entries, suite_min_duts)
    569         for host, queue_entry in zip(matching_hosts, queue_entries):
    570             if host:
    571                 suites[queue_entry.job.parent_job_id][1].append(host)
    572 
    573         for setting, hosts in suites.itervalues():
    574             self.assertEqual(len(hosts),setting['expected_aquired'])
    575 
    576 
    577     def testHighPriorityTakeAll(self):
    578         """Min duts not satisfied."""
    579         num_hosts = 1
    580         suite1 = {'priority':20, 'num_jobs': 3, 'min_duts': 2,
    581                   'expected_aquired': 1}
    582         suite2 = {'priority':10, 'num_jobs': 7, 'min_duts': 5,
    583                   'expected_aquired': 0}
    584         self.min_dut_test_helper(num_hosts, [suite1, suite2])
    585 
    586 
    587     def testHighPriorityMinSatisfied(self):
    588         """High priority min duts satisfied."""
    589         num_hosts = 4
    590         suite1 = {'priority':20, 'num_jobs': 4, 'min_duts': 2,
    591                   'expected_aquired': 2}
    592         suite2 = {'priority':10, 'num_jobs': 7, 'min_duts': 5,
    593                   'expected_aquired': 2}
    594         self.min_dut_test_helper(num_hosts, [suite1, suite2])
    595 
    596 
    597     def testAllPrioritiesMinSatisfied(self):
    598         """Min duts satisfied."""
    599         num_hosts = 7
    600         suite1 = {'priority':20, 'num_jobs': 4, 'min_duts': 2,
    601                   'expected_aquired': 2}
    602         suite2 = {'priority':10, 'num_jobs': 7, 'min_duts': 5,
    603                   'expected_aquired': 5}
    604         self.min_dut_test_helper(num_hosts, [suite1, suite2])
    605 
    606 
    607     def testHighPrioritySatisfied(self):
    608         """Min duts satisfied, high priority suite satisfied."""
    609         num_hosts = 10
    610         suite1 = {'priority':20, 'num_jobs': 4, 'min_duts': 2,
    611                   'expected_aquired': 4}
    612         suite2 = {'priority':10, 'num_jobs': 7, 'min_duts': 5,
    613                   'expected_aquired': 6}
    614         self.min_dut_test_helper(num_hosts, [suite1, suite2])
    615 
    616 
    617     def testEqualPriorityFirstSuiteMinSatisfied(self):
    618         """Equal priority, earlier suite got min duts."""
    619         num_hosts = 4
    620         suite1 = {'priority':20, 'num_jobs': 4, 'min_duts': 2,
    621                   'expected_aquired': 2}
    622         suite2 = {'priority':20, 'num_jobs': 7, 'min_duts': 5,
    623                   'expected_aquired': 2}
    624         self.min_dut_test_helper(num_hosts, [suite1, suite2])
    625 
    626 
    627     def testEqualPriorityAllSuitesMinSatisfied(self):
    628         """Equal priority, all suites got min duts."""
    629         num_hosts = 7
    630         suite1 = {'priority':20, 'num_jobs': 4, 'min_duts': 2,
    631                   'expected_aquired': 2}
    632         suite2 = {'priority':20, 'num_jobs': 7, 'min_duts': 5,
    633                   'expected_aquired': 5}
    634         self.min_dut_test_helper(num_hosts, [suite1, suite2])
    635 
    636 
    637 if __name__ == '__main__':
    638     unittest.main()
    639