diff mbox

[KVM-AUTOTEST] stress_boot - Boot VMs until one of them becomes unresponsive - Version2

Message ID 4A3A0605.6060303@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Yolkfull Chow June 18, 2009, 9:16 a.m. UTC
On 06/18/2009 04:17 PM, Lucas Meneghel Rodrigues wrote:
> On Fri, 2009-06-12 at 21:27 +0800, Yolkfull Chow wrote:
>    
>> Following are the differences between version 1:
>>
>> 1) use framework to destroy VMs except the main_vm
>> 2) use snapshot to boot other VMs except the first one
>>
>>
>> Regards,
>> Yolkfull
>>      
> Hi Yolkfull, Michael and Uri already made a thorough first comment about
> your test, and I have a minor thing to note (and I admit I'm being picky
> here):
>
> +            # check whether all previous ssh sessions are responsive
> +            for i, vm_session in enumerate(sessions):
> +                if vm_session.get_command_status(params.get("alive_test_cmd")):
> +                    raise error.TestFail("Session #%d is not responsive" % i)
> +            num += 1
> +
> +        except (error.TestFail, OSError):
> +            for se in sessions:
> +                se.close()
> +            logging.info("Total number booted: %d" % num)
> +            raise
> +    else:
> +        for se in sessions:
> +            se.close()
> +        logging.info("Total number booted: %d" % num)
>
> When the test finishes successfuly, the counter num will be incremented
> by one, will break the while condition and later will be used to print
> the number of vms successfuly booted. In the end the total number of vms
> booted that the test will report is the actual number of vms booted plus
> 1. To fix this we can either:
>
>   * Just subtract 1 from num at the last info logging call;
>   * Remove num initialization and replace the while loop by a
>
> for num in range(1, int(params.get("max_vms")):
>
> this way we don't even need to increment num manually.
>
> It's up to you which one you're going to implement. I have tested your
> code and it works fine (aside from the minor cosmetic issue). Once you
> send me an updated version, I am going to apply it.
>
> Thanks for your work!
>
>    
Hi Lucas,  I also found the number counting problem later after sending 
the patch. I haven't been able to re-send the updated one since I got 
some other things to deal with in these days.   Sorry for that...

Please see attachment for updated version.   Thank you so much.  :)

Comments

Lucas Meneghel Rodrigues June 19, 2009, 1:06 p.m. UTC | #1
On Thu, 2009-06-18 at 17:16 +0800, Yolkfull Chow wrote:

> Hi Lucas,  I also found the number counting problem later after sending 
> the patch. I haven't been able to re-send the updated one since I got 
> some other things to deal with in these days.   Sorry for that...
> 
> Please see attachment for updated version.   Thank you so much.  :)

Ok, patch applied. Thank you very much for your work!


--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/client/tests/kvm/kvm.py b/client/tests/kvm/kvm.py
index 9428162..43d7bbc 100644
--- a/client/tests/kvm/kvm.py
+++ b/client/tests/kvm/kvm.py
@@ -53,6 +53,7 @@  class kvm(test.test):
                 "autotest":     test_routine("kvm_tests", "run_autotest"),
                 "kvm_install":  test_routine("kvm_install", "run_kvm_install"),
                 "linux_s3":     test_routine("kvm_tests", "run_linux_s3"),
+                "stress_boot":  test_routine("kvm_tests", "run_stress_boot"),
                 }
 
         # Make it possible to import modules from the test's bindir
diff --git a/client/tests/kvm/kvm_tests.cfg.sample b/client/tests/kvm/kvm_tests.cfg.sample
index 2c0b321..7f4e9b9 100644
--- a/client/tests/kvm/kvm_tests.cfg.sample
+++ b/client/tests/kvm/kvm_tests.cfg.sample
@@ -82,6 +82,11 @@  variants:
     - linux_s3:      install setup
         type = linux_s3
 
+    - stress_boot:
+        type = stress_boot
+        max_vms = 5    
+        alive_test_cmd = ps aux
+
 # NICs
 variants:
     - @rtl8139:
@@ -292,6 +297,8 @@  variants:
         password = 123456
         migrate:
             migration_test_command = ver && vol
+        stress_boot:
+            alive_test_cmd = systeminfo
 
         variants:
             - Win2000:
diff --git a/client/tests/kvm/kvm_tests.py b/client/tests/kvm/kvm_tests.py
index 4270cae..11f7bf0 100644
--- a/client/tests/kvm/kvm_tests.py
+++ b/client/tests/kvm/kvm_tests.py
@@ -474,3 +474,77 @@  def run_linux_s3(test, params, env):
     logging.info("VM resumed after S3")
 
     session.close()
+
+
+def run_stress_boot(tests, params, env):
+    """
+    Boots VMs until one of them becomes unresponsive, and records the maximum
+    number of VMs successfully started:
+    1) boot the first vm
+    2) boot the second vm cloned from the first vm, check whether it boots up
+       and all booted vms can ssh-login
+    3) go on until cannot create VM anymore or cannot allocate memory for VM
+
+    @param test:   kvm test object
+    @param params: Dictionary with the test parameters
+    @param env:    Dictionary with test environment.
+    """
+    # boot the first vm
+    vm = kvm_utils.env_get_vm(env, params.get("main_vm"))
+
+    if not vm:
+        raise error.TestError("VM object not found in environment")
+    if not vm.is_alive():
+        raise error.TestError("VM seems to be dead; Test requires a living VM")
+
+    logging.info("Waiting for first guest to be up...")
+
+    session = kvm_utils.wait_for(vm.ssh_login, 240, 0, 2)
+    if not session:
+        raise error.TestFail("Could not log into first guest")
+
+    num = 2
+    vms = []
+    sessions = [session]
+
+    # boot the VMs
+    while num <= int(params.get("max_vms")):
+        try:
+            vm_name = "vm" + str(num)
+
+            # clone vm according to the first one
+            vm_params = params.copy()
+            vm_params['image_snapshot'] = "yes"
+            vm_params['kill_vm'] = "yes"
+            vm_params['kill_vm_gracefully'] = "no"
+            curr_vm = vm.clone(vm_name, vm_params)
+            kvm_utils.env_register_vm(env, vm_name, curr_vm)
+            params['vms'] += " " + vm_name
+
+            #vms.append(curr_vm)
+            logging.info("Booting guest #%d" % num)
+            if not curr_vm.create():
+                raise error.TestFail("Cannot create VM #%d" % num)
+
+            curr_vm_session = kvm_utils.wait_for(curr_vm.ssh_login, 240, 0, 2)
+            if not curr_vm_session:
+                raise error.TestFail("Could not log into guest #%d" % num)
+
+            logging.info("Guest #%d boots up successfully" % num)
+            sessions.append(curr_vm_session)
+
+            # check whether all previous ssh sessions are responsive
+            for i, vm_session in enumerate(sessions):
+                if vm_session.get_command_status(params.get("alive_test_cmd")):
+                    raise error.TestFail("Session #%d is not responsive" % i)
+            num += 1
+
+        except (error.TestFail, OSError):
+            for se in sessions:
+                se.close()
+            logging.info("Total number booted: %d" % (num - 1))
+            raise
+    else:
+        for se in sessions:
+            se.close()
+        logging.info("Total number booted: %d" % (num -1))