Home | History | Annotate | Download | only in tests
      1 import logging, time, random, math, os
      2 from autotest_lib.client.common_lib import error
      3 from autotest_lib.client.bin import utils
      4 from autotest_lib.client.virt import virt_utils, virt_test_utils, aexpect
      5 from autotest_lib.client.virt import virt_env_process
      6 
      7 
      8 def run_ksm_overcommit(test, params, env):
      9     """
     10     Test how KSM (Kernel Shared Memory) act when more than physical memory is
     11     used. In second part we also test how KVM handles a situation when the host
     12     runs out of memory (it is expected to pause the guest system, wait until
     13     some process returns memory and bring the guest back to life)
     14 
     15     @param test: kvm test object.
     16     @param params: Dictionary with test parameters.
     17     @param env: Dictionary with the test wnvironment.
     18     """
     19 
     20     def _start_allocator(vm, session, timeout):
     21         """
     22         Execute ksm_overcommit_guest.py on a guest, wait until it is initialized.
     23 
     24         @param vm: VM object.
     25         @param session: Remote session to a VM object.
     26         @param timeout: Timeout that will be used to verify if
     27                 ksm_overcommit_guest.py started properly.
     28         """
     29         logging.debug("Starting ksm_overcommit_guest.py on guest %s", vm.name)
     30         session.sendline("python /tmp/ksm_overcommit_guest.py")
     31         try:
     32             session.read_until_last_line_matches(["PASS:", "FAIL:"], timeout)
     33         except aexpect.ExpectProcessTerminatedError, e:
     34             e_msg = ("Command ksm_overcommit_guest.py on vm '%s' failed: %s" %
     35                      (vm.name, str(e)))
     36             raise error.TestFail(e_msg)
     37 
     38 
     39     def _execute_allocator(command, vm, session, timeout):
     40         """
     41         Execute a given command on ksm_overcommit_guest.py main loop,
     42         indicating the vm the command was executed on.
     43 
     44         @param command: Command that will be executed.
     45         @param vm: VM object.
     46         @param session: Remote session to VM object.
     47         @param timeout: Timeout used to verify expected output.
     48 
     49         @return: Tuple (match index, data)
     50         """
     51         logging.debug("Executing '%s' on ksm_overcommit_guest.py loop, "
     52                       "vm: %s, timeout: %s", command, vm.name, timeout)
     53         session.sendline(command)
     54         try:
     55             (match, data) = session.read_until_last_line_matches(
     56                                                              ["PASS:","FAIL:"],
     57                                                              timeout)
     58         except aexpect.ExpectProcessTerminatedError, e:
     59             e_msg = ("Failed to execute command '%s' on "
     60                      "ksm_overcommit_guest.py, vm '%s': %s" %
     61                      (command, vm.name, str(e)))
     62             raise error.TestFail(e_msg)
     63         return (match, data)
     64 
     65 
     66     def get_ksmstat():
     67         """
     68         Return sharing memory by ksm in MB
     69 
     70         @return: memory in MB
     71         """
     72         f = open('/sys/kernel/mm/ksm/pages_sharing')
     73         ksm_pages = int(f.read())
     74         f.close()
     75         return ((ksm_pages*4096)/1e6)
     76 
     77 
     78     def initialize_guests():
     79         """
     80         Initialize guests (fill their memories with specified patterns).
     81         """
     82         logging.info("Phase 1: filling guest memory pages")
     83         for session in lsessions:
     84             vm = lvms[lsessions.index(session)]
     85 
     86             logging.debug("Turning off swap on vm %s", vm.name)
     87             session.cmd("swapoff -a", timeout=300)
     88 
     89             # Start the allocator
     90             _start_allocator(vm, session, 60 * perf_ratio)
     91 
     92         # Execute allocator on guests
     93         for i in range(0, vmsc):
     94             vm = lvms[i]
     95 
     96             a_cmd = "mem = MemFill(%d, %s, %s)" % (ksm_size, skeys[i], dkeys[i])
     97             _execute_allocator(a_cmd, vm, lsessions[i], 60 * perf_ratio)
     98 
     99             a_cmd = "mem.value_fill(%d)" % skeys[0]
    100             _execute_allocator(a_cmd, vm, lsessions[i], 120 * perf_ratio)
    101 
    102             # Let ksm_overcommit_guest.py do its job
    103             # (until shared mem reaches expected value)
    104             shm = 0
    105             j = 0
    106             logging.debug("Target shared meminfo for guest %s: %s", vm.name,
    107                           ksm_size)
    108             while ((new_ksm and (shm < (ksm_size*(i+1)))) or
    109                     (not new_ksm and (shm < (ksm_size)))):
    110                 if j > 64:
    111                     logging.debug(virt_test_utils.get_memory_info(lvms))
    112                     raise error.TestError("SHM didn't merge the memory until "
    113                                           "the DL on guest: %s" % vm.name)
    114                 st = ksm_size / 200 * perf_ratio
    115                 logging.debug("Waiting %ds before proceeding...", st)
    116                 time.sleep(st)
    117                 if (new_ksm):
    118                     shm = get_ksmstat()
    119                 else:
    120                     shm = vm.get_shared_meminfo()
    121                 logging.debug("Shared meminfo for guest %s after "
    122                               "iteration %s: %s", vm.name, j, shm)
    123                 j += 1
    124 
    125         # Keep some reserve
    126         rt = ksm_size / 200 * perf_ratio
    127         logging.debug("Waiting %ds before proceeding...", rt)
    128         time.sleep(rt)
    129 
    130         logging.debug(virt_test_utils.get_memory_info(lvms))
    131         logging.info("Phase 1: PASS")
    132 
    133 
    134     def separate_first_guest():
    135         """
    136         Separate memory of the first guest by generating special random series
    137         """
    138         logging.info("Phase 2: Split the pages on the first guest")
    139 
    140         a_cmd = "mem.static_random_fill()"
    141         data = _execute_allocator(a_cmd, lvms[0], lsessions[0],
    142                                   120 * perf_ratio)[1]
    143 
    144         r_msg = data.splitlines()[-1]
    145         logging.debug("Return message of static_random_fill: %s", r_msg)
    146         out = int(r_msg.split()[4])
    147         logging.debug("Performance: %dMB * 1000 / %dms = %dMB/s", ksm_size, out,
    148                      (ksm_size * 1000 / out))
    149         logging.debug(virt_test_utils.get_memory_info(lvms))
    150         logging.debug("Phase 2: PASS")
    151 
    152 
    153     def split_guest():
    154         """
    155         Sequential split of pages on guests up to memory limit
    156         """
    157         logging.info("Phase 3a: Sequential split of pages on guests up to "
    158                      "memory limit")
    159         last_vm = 0
    160         session = None
    161         vm = None
    162         for i in range(1, vmsc):
    163             # Check VMs
    164             for j in range(0, vmsc):
    165                 if not lvms[j].is_alive:
    166                     e_msg = ("VM %d died while executing static_random_fill in "
    167                              "VM %d on allocator loop" % (j, i))
    168                     raise error.TestFail(e_msg)
    169             vm = lvms[i]
    170             session = lsessions[i]
    171             a_cmd = "mem.static_random_fill()"
    172             logging.debug("Executing %s on ksm_overcommit_guest.py loop, "
    173                           "vm: %s", a_cmd, vm.name)
    174             session.sendline(a_cmd)
    175 
    176             out = ""
    177             try:
    178                 logging.debug("Watching host memory while filling vm %s memory",
    179                               vm.name)
    180                 while not out.startswith("PASS") and not out.startswith("FAIL"):
    181                     if not vm.is_alive():
    182                         e_msg = ("VM %d died while executing static_random_fill"
    183                                  " on allocator loop" % i)
    184                         raise error.TestFail(e_msg)
    185                     free_mem = int(utils.read_from_meminfo("MemFree"))
    186                     if (ksm_swap):
    187                         free_mem = (free_mem +
    188                                     int(utils.read_from_meminfo("SwapFree")))
    189                     logging.debug("Free memory on host: %d", free_mem)
    190 
    191                     # We need to keep some memory for python to run.
    192                     if (free_mem < 64000) or (ksm_swap and
    193                                               free_mem < (450000 * perf_ratio)):
    194                         vm.monitor.cmd("stop")
    195                         for j in range(0, i):
    196                             lvms[j].destroy(gracefully = False)
    197                         time.sleep(20)
    198                         vm.monitor.cmd("c")
    199                         logging.debug("Only %s free memory, killing %d guests",
    200                                       free_mem, (i - 1))
    201                         last_vm = i
    202                         break
    203                     out = session.read_nonblocking(0.1)
    204                     time.sleep(2)
    205             except OSError:
    206                 logging.debug("Only %s host free memory, killing %d guests",
    207                               free_mem, (i - 1))
    208                 logging.debug("Stopping %s", vm.name)
    209                 vm.monitor.cmd("stop")
    210                 for j in range(0, i):
    211                     logging.debug("Destroying %s", lvms[j].name)
    212                     lvms[j].destroy(gracefully = False)
    213                 time.sleep(20)
    214                 vm.monitor.cmd("c")
    215                 last_vm = i
    216 
    217             if last_vm != 0:
    218                 break
    219             logging.debug("Memory filled for guest %s", vm.name)
    220 
    221         logging.info("Phase 3a: PASS")
    222 
    223         logging.info("Phase 3b: Check if memory in max loading guest is right")
    224         for i in range(last_vm + 1, vmsc):
    225             lsessions[i].close()
    226             if i == (vmsc - 1):
    227                 logging.debug(virt_test_utils.get_memory_info([lvms[i]]))
    228             logging.debug("Destroying guest %s", lvms[i].name)
    229             lvms[i].destroy(gracefully = False)
    230 
    231         # Verify last machine with randomly generated memory
    232         a_cmd = "mem.static_random_verify()"
    233         _execute_allocator(a_cmd, lvms[last_vm], lsessions[last_vm],
    234                            (mem / 200 * 50 * perf_ratio))
    235         logging.debug(virt_test_utils.get_memory_info([lvms[last_vm]]))
    236 
    237         lsessions[i].cmd_output("die()", 20)
    238         lvms[last_vm].destroy(gracefully = False)
    239         logging.info("Phase 3b: PASS")
    240 
    241 
    242     def split_parallel():
    243         """
    244         Parallel page spliting
    245         """
    246         logging.info("Phase 1: parallel page spliting")
    247         # We have to wait until allocator is finished (it waits 5 seconds to
    248         # clean the socket
    249 
    250         session = lsessions[0]
    251         vm = lvms[0]
    252         for i in range(1, max_alloc):
    253             lsessions.append(vm.wait_for_login(timeout=360))
    254 
    255         session.cmd("swapoff -a", timeout=300)
    256 
    257         for i in range(0, max_alloc):
    258             # Start the allocator
    259             _start_allocator(vm, lsessions[i], 60 * perf_ratio)
    260 
    261         logging.info("Phase 1: PASS")
    262 
    263         logging.info("Phase 2a: Simultaneous merging")
    264         logging.debug("Memory used by allocator on guests = %dMB",
    265                       (ksm_size / max_alloc))
    266 
    267         for i in range(0, max_alloc):
    268             a_cmd = "mem = MemFill(%d, %s, %s)" % ((ksm_size / max_alloc),
    269                                                    skeys[i], dkeys[i])
    270             _execute_allocator(a_cmd, vm, lsessions[i], 60 * perf_ratio)
    271 
    272             a_cmd = "mem.value_fill(%d)" % (skeys[0])
    273             _execute_allocator(a_cmd, vm, lsessions[i], 90 * perf_ratio)
    274 
    275         # Wait until ksm_overcommit_guest.py merges the pages (3 * ksm_size / 3)
    276         shm = 0
    277         i = 0
    278         logging.debug("Target shared memory size: %s", ksm_size)
    279         while (shm < ksm_size):
    280             if i > 64:
    281                 logging.debug(virt_test_utils.get_memory_info(lvms))
    282                 raise error.TestError("SHM didn't merge the memory until DL")
    283             wt = ksm_size / 200 * perf_ratio
    284             logging.debug("Waiting %ds before proceed...", wt)
    285             time.sleep(wt)
    286             if (new_ksm):
    287                 shm = get_ksmstat()
    288             else:
    289                 shm = vm.get_shared_meminfo()
    290             logging.debug("Shared meminfo after attempt %s: %s", i, shm)
    291             i += 1
    292 
    293         logging.debug(virt_test_utils.get_memory_info([vm]))
    294         logging.info("Phase 2a: PASS")
    295 
    296         logging.info("Phase 2b: Simultaneous spliting")
    297         # Actual splitting
    298         for i in range(0, max_alloc):
    299             a_cmd = "mem.static_random_fill()"
    300             data = _execute_allocator(a_cmd, vm, lsessions[i],
    301                                       90 * perf_ratio)[1]
    302 
    303             data = data.splitlines()[-1]
    304             logging.debug(data)
    305             out = int(data.split()[4])
    306             logging.debug("Performance: %dMB * 1000 / %dms = %dMB/s",
    307                           (ksm_size / max_alloc), out,
    308                           (ksm_size * 1000 / out / max_alloc))
    309         logging.debug(virt_test_utils.get_memory_info([vm]))
    310         logging.info("Phase 2b: PASS")
    311 
    312         logging.info("Phase 2c: Simultaneous verification")
    313         for i in range(0, max_alloc):
    314             a_cmd = "mem.static_random_verify()"
    315             data = _execute_allocator(a_cmd, vm, lsessions[i],
    316                                       (mem / 200 * 50 * perf_ratio))[1]
    317         logging.info("Phase 2c: PASS")
    318 
    319         logging.info("Phase 2d: Simultaneous merging")
    320         # Actual splitting
    321         for i in range(0, max_alloc):
    322             a_cmd = "mem.value_fill(%d)" % skeys[0]
    323             data = _execute_allocator(a_cmd, vm, lsessions[i],
    324                                       120 * perf_ratio)[1]
    325         logging.debug(virt_test_utils.get_memory_info([vm]))
    326         logging.info("Phase 2d: PASS")
    327 
    328         logging.info("Phase 2e: Simultaneous verification")
    329         for i in range(0, max_alloc):
    330             a_cmd = "mem.value_check(%d)" % skeys[0]
    331             data = _execute_allocator(a_cmd, vm, lsessions[i],
    332                                       (mem / 200 * 50 * perf_ratio))[1]
    333         logging.info("Phase 2e: PASS")
    334 
    335         logging.info("Phase 2f: Simultaneous spliting last 96B")
    336         for i in range(0, max_alloc):
    337             a_cmd = "mem.static_random_fill(96)"
    338             data = _execute_allocator(a_cmd, vm, lsessions[i],
    339                                       60 * perf_ratio)[1]
    340 
    341             data = data.splitlines()[-1]
    342             out = int(data.split()[4])
    343             logging.debug("Performance: %dMB * 1000 / %dms = %dMB/s",
    344                          ksm_size/max_alloc, out,
    345                          (ksm_size * 1000 / out / max_alloc))
    346 
    347         logging.debug(virt_test_utils.get_memory_info([vm]))
    348         logging.info("Phase 2f: PASS")
    349 
    350         logging.info("Phase 2g: Simultaneous verification last 96B")
    351         for i in range(0, max_alloc):
    352             a_cmd = "mem.static_random_verify(96)"
    353             (match, data) = _execute_allocator(a_cmd, vm, lsessions[i],
    354                                                (mem / 200 * 50 * perf_ratio))
    355         logging.debug(virt_test_utils.get_memory_info([vm]))
    356         logging.info("Phase 2g: PASS")
    357 
    358         logging.debug("Cleaning up...")
    359         for i in range(0, max_alloc):
    360             lsessions[i].cmd_output("die()", 20)
    361         session.close()
    362         vm.destroy(gracefully = False)
    363 
    364 
    365     # Main test code
    366     logging.info("Starting phase 0: Initialization")
    367     new_ksm = False
    368     if (os.path.exists("/sys/kernel/mm/ksm/run")):
    369         utils.run("echo 50 > /sys/kernel/mm/ksm/sleep_millisecs")
    370         utils.run("echo 5000 > /sys/kernel/mm/ksm/pages_to_scan")
    371         utils.run("echo 1 > /sys/kernel/mm/ksm/run")
    372 
    373         e_up = "/sys/kernel/mm/transparent_hugepage/enabled"
    374         e_rh = "/sys/kernel/mm/redhat_transparent_hugepage/enabled"
    375         if os.path.exists(e_up):
    376             utils.run("echo 'never' > %s" % e_up)
    377         if os.path.exists(e_rh):
    378             utils.run("echo 'never' > %s" % e_rh)
    379         new_ksm = True
    380     else:
    381         try:
    382             utils.run("modprobe ksm")
    383             utils.run("ksmctl start 5000 100")
    384         except error.CmdError, e:
    385             raise error.TestFail("Failed to load KSM: %s" % e)
    386 
    387     # host_reserve: mem reserve kept for the host system to run
    388     host_reserve = int(params.get("ksm_host_reserve", -1))
    389     if (host_reserve == -1):
    390         # default host_reserve = MemAvailable + one_minimal_guest(128MB)
    391         # later we add 64MB per additional guest
    392         host_reserve = ((utils.memtotal() - utils.read_from_meminfo("MemFree"))
    393                         / 1024 + 128)
    394         # using default reserve
    395         _host_reserve = True
    396     else:
    397         _host_reserve = False
    398 
    399     # guest_reserve: mem reserve kept to avoid guest OS to kill processes
    400     guest_reserve = int(params.get("ksm_guest_reserve", -1))
    401     if (guest_reserve == -1):
    402         # default guest_reserve = minimal_system_mem(256MB)
    403         # later we add tmpfs overhead
    404         guest_reserve = 256
    405         # using default reserve
    406         _guest_reserve = True
    407     else:
    408         _guest_reserve = False
    409 
    410     max_vms = int(params.get("max_vms", 2))
    411     overcommit = float(params.get("ksm_overcommit_ratio", 2.0))
    412     max_alloc = int(params.get("ksm_parallel_ratio", 1))
    413 
    414     # vmsc: count of all used VMs
    415     vmsc = int(overcommit) + 1
    416     vmsc = max(vmsc, max_vms)
    417 
    418     if (params['ksm_mode'] == "serial"):
    419         max_alloc = vmsc
    420         if _host_reserve:
    421             # First round of additional guest reserves
    422             host_reserve += vmsc * 64
    423             _host_reserve = vmsc
    424 
    425     host_mem = (int(utils.memtotal()) / 1024 - host_reserve)
    426 
    427     ksm_swap = False
    428     if params.get("ksm_swap") == "yes":
    429         ksm_swap = True
    430 
    431     # Performance ratio
    432     perf_ratio = params.get("ksm_perf_ratio")
    433     if perf_ratio:
    434         perf_ratio = float(perf_ratio)
    435     else:
    436         perf_ratio = 1
    437 
    438     if (params['ksm_mode'] == "parallel"):
    439         vmsc = 1
    440         overcommit = 1
    441         mem = host_mem
    442         # 32bit system adjustment
    443         if not params['image_name'].endswith("64"):
    444             logging.debug("Probably i386 guest architecture, "
    445                           "max allocator mem = 2G")
    446             # Guest can have more than 2G but
    447             # kvm mem + 1MB (allocator itself) can't
    448             if (host_mem > 3100):
    449                 mem = 3100
    450 
    451         if os.popen("uname -i").readline().startswith("i386"):
    452             logging.debug("Host is i386 architecture, max guest mem is 2G")
    453             # Guest system with qemu overhead (64M) can't have more than 2G
    454             if mem > 3100 - 64:
    455                 mem = 3100 - 64
    456 
    457     else:
    458         # mem: Memory of the guest systems. Maximum must be less than
    459         # host's physical ram
    460         mem = int(overcommit * host_mem / vmsc)
    461 
    462         # 32bit system adjustment
    463         if not params['image_name'].endswith("64"):
    464             logging.debug("Probably i386 guest architecture, "
    465                           "max allocator mem = 2G")
    466             # Guest can have more than 2G but
    467             # kvm mem + 1MB (allocator itself) can't
    468             if mem - guest_reserve - 1 > 3100:
    469                 vmsc = int(math.ceil((host_mem * overcommit) /
    470                                      (3100 + guest_reserve)))
    471                 if _host_reserve:
    472                     host_reserve += (vmsc - _host_reserve) * 64
    473                     host_mem -= (vmsc - _host_reserve) * 64
    474                     _host_reserve = vmsc
    475                 mem = int(math.floor(host_mem * overcommit / vmsc))
    476 
    477         if os.popen("uname -i").readline().startswith("i386"):
    478             logging.debug("Host is i386 architecture, max guest mem is 2G")
    479             # Guest system with qemu overhead (64M) can't have more than 2G
    480             if mem > 3100 - 64:
    481                 vmsc = int(math.ceil((host_mem * overcommit) /
    482                                      (3100 - 64.0)))
    483                 if _host_reserve:
    484                     host_reserve += (vmsc - _host_reserve) * 64
    485                     host_mem -= (vmsc - _host_reserve) * 64
    486                     _host_reserve = vmsc
    487                 mem = int(math.floor(host_mem * overcommit / vmsc))
    488 
    489     # 0.055 represents OS + TMPFS additional reserve per guest ram MB
    490     if _guest_reserve:
    491         guest_reserve += math.ceil(mem * 0.055)
    492 
    493     swap = int(utils.read_from_meminfo("SwapTotal")) / 1024
    494 
    495     logging.debug("Overcommit = %f", overcommit)
    496     logging.debug("True overcommit = %f ", (float(vmsc * mem) /
    497                                             float(host_mem)))
    498     logging.debug("Host memory = %dM", host_mem)
    499     logging.debug("Guest memory = %dM", mem)
    500     logging.debug("Using swap = %s", ksm_swap)
    501     logging.debug("Swap = %dM", swap)
    502     logging.debug("max_vms = %d", max_vms)
    503     logging.debug("Count of all used VMs = %d", vmsc)
    504     logging.debug("Performance_ratio = %f", perf_ratio)
    505 
    506     # Generate unique keys for random series
    507     skeys = []
    508     dkeys = []
    509     for i in range(0, max(vmsc, max_alloc)):
    510         key = random.randrange(0, 255)
    511         while key in skeys:
    512             key = random.randrange(0, 255)
    513         skeys.append(key)
    514 
    515         key = random.randrange(0, 999)
    516         while key in dkeys:
    517             key = random.randrange(0, 999)
    518         dkeys.append(key)
    519 
    520     logging.debug("skeys: %s", skeys)
    521     logging.debug("dkeys: %s", dkeys)
    522 
    523     lvms = []
    524     lsessions = []
    525 
    526     # As we don't know the number and memory amount of VMs in advance,
    527     # we need to specify and create them here
    528     vm_name = params.get("main_vm")
    529     params['mem'] = mem
    530     params['vms'] = vm_name
    531     # Associate pidfile name
    532     params['pid_' + vm_name] = virt_utils.generate_tmp_file_name(vm_name,
    533                                                                 'pid')
    534     if not params.get('extra_params'):
    535         params['extra_params'] = ' '
    536     params['extra_params_' + vm_name] = params.get('extra_params')
    537     params['extra_params_' + vm_name] += (" -pidfile %s" %
    538                                           (params.get('pid_' + vm_name)))
    539     params['extra_params'] = params.get('extra_params_'+vm_name)
    540 
    541     # ksm_size: amount of memory used by allocator
    542     ksm_size = mem - guest_reserve
    543     logging.debug("Memory used by allocator on guests = %dM", ksm_size)
    544 
    545     # Creating the first guest
    546     virt_env_process.preprocess_vm(test, params, env, vm_name)
    547     lvms.append(env.get_vm(vm_name))
    548     if not lvms[0]:
    549         raise error.TestError("VM object not found in environment")
    550     if not lvms[0].is_alive():
    551         raise error.TestError("VM seems to be dead; Test requires a living "
    552                               "VM")
    553 
    554     logging.debug("Booting first guest %s", lvms[0].name)
    555 
    556     lsessions.append(lvms[0].wait_for_login(timeout=360))
    557     # Associate vm PID
    558     try:
    559         tmp = open(params.get('pid_' + vm_name), 'r')
    560         params['pid_' + vm_name] = int(tmp.readline())
    561     except:
    562         raise error.TestFail("Could not get PID of %s" % (vm_name))
    563 
    564     # Creating other guest systems
    565     for i in range(1, vmsc):
    566         vm_name = "vm" + str(i + 1)
    567         params['pid_' + vm_name] = virt_utils.generate_tmp_file_name(vm_name,
    568                                                                     'pid')
    569         params['extra_params_' + vm_name] = params.get('extra_params')
    570         params['extra_params_' + vm_name] += (" -pidfile %s" %
    571                                              (params.get('pid_' + vm_name)))
    572         params['extra_params'] = params.get('extra_params_' + vm_name)
    573 
    574         # Last VM is later used to run more allocators simultaneously
    575         lvms.append(lvms[0].clone(vm_name, params))
    576         env.register_vm(vm_name, lvms[i])
    577         params['vms'] += " " + vm_name
    578 
    579         logging.debug("Booting guest %s", lvms[i].name)
    580         lvms[i].create()
    581         if not lvms[i].is_alive():
    582             raise error.TestError("VM %s seems to be dead; Test requires a"
    583                                   "living VM" % lvms[i].name)
    584 
    585         lsessions.append(lvms[i].wait_for_login(timeout=360))
    586         try:
    587             tmp = open(params.get('pid_' + vm_name), 'r')
    588             params['pid_' + vm_name] = int(tmp.readline())
    589         except:
    590             raise error.TestFail("Could not get PID of %s" % (vm_name))
    591 
    592     # Let guests rest a little bit :-)
    593     st = vmsc * 2 * perf_ratio
    594     logging.debug("Waiting %ds before proceed", st)
    595     time.sleep(vmsc * 2 * perf_ratio)
    596     logging.debug(virt_test_utils.get_memory_info(lvms))
    597 
    598     # Copy ksm_overcommit_guest.py into guests
    599     pwd = os.path.join(os.environ['AUTODIR'],'tests/kvm')
    600     vksmd_src = os.path.join(pwd, "scripts/ksm_overcommit_guest.py")
    601     dst_dir = "/tmp"
    602     for vm in lvms:
    603         vm.copy_files_to(vksmd_src, dst_dir)
    604     logging.info("Phase 0: PASS")
    605 
    606     if params['ksm_mode'] == "parallel":
    607         logging.info("Starting KSM test parallel mode")
    608         split_parallel()
    609         logging.info("KSM test parallel mode: PASS")
    610     elif params['ksm_mode'] == "serial":
    611         logging.info("Starting KSM test serial mode")
    612         initialize_guests()
    613         separate_first_guest()
    614         split_guest()
    615         logging.info("KSM test serial mode: PASS")
    616