diff mbox

[2/3] KVM test: Add Transparent Hugepages subtests v2

Message ID 1308183516-5247-3-git-send-email-lmr@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Lucas Meneghel Rodrigues June 16, 2011, 12:18 a.m. UTC
Transparent hugepage test includes:

1) Smoke test and stress test
Smoking test is test the transparent hugepage is used by kvm and guest.
Stress test test use a parallel dd to test the stability of transparent
hugepages

2) Swap test
Bootup a vm and verify that it can be swapped out and swapped in
correctly

3) Defrag test
Allocate hugepage for libhugetlbfs while defrag is on and off. Then
compare the results

Changes from v1:
* Different paths to mount debugfs and tmpfs on
* Use of autotest API to execute commands
* Use more current guest virt API to execute commands

Signed-off-by: Yiqiao Pu <ypu@redhat.com>
Signed-off-by: Lucas Meneghel Rodrigues <lmr@redhat.com>
---
 client/tests/kvm/tests/trans_hugepage.py          |  101 +++++++++++++++++++
 client/tests/kvm/tests/trans_hugepage_defrag.py   |   85 ++++++++++++++++
 client/tests/kvm/tests/trans_hugepage_swapping.py |  109 +++++++++++++++++++++
 3 files changed, 295 insertions(+), 0 deletions(-)
 create mode 100644 client/tests/kvm/tests/trans_hugepage.py
 create mode 100644 client/tests/kvm/tests/trans_hugepage_defrag.py
 create mode 100644 client/tests/kvm/tests/trans_hugepage_swapping.py
diff mbox

Patch

diff --git a/client/tests/kvm/tests/trans_hugepage.py b/client/tests/kvm/tests/trans_hugepage.py
new file mode 100644
index 0000000..cbda16c
--- /dev/null
+++ b/client/tests/kvm/tests/trans_hugepage.py
@@ -0,0 +1,101 @@ 
+import logging, time, commands, os, string, re
+from autotest_lib.client.common_lib import error
+from autotest_lib.client.common_lib import utils
+from autotest_lib.client.virt import virt_test_utils, aexpect
+
+
+@error.context_aware
+def run_trans_hugepage(test, params, env):
+    """
+    KVM kernel hugepages user side test:
+    1) Smoke test
+    2) Stress test
+
+    @param test: KVM test object.
+    @param params: Dictionary with test parameters.
+    @param env: Dictionary with the test environment.
+    """
+    def get_mem_status(params, type):
+        if type == "host":
+            info = utils.system_output("cat /proc/meminfo")
+        else:
+            info = session.cmd("cat /proc/meminfo")
+        for h in re.split("\n+", info):
+            if h.startswith("%s" % params):
+                output = re.split('\s+', h)[1]
+        return output
+
+
+    # Check khugepage is used by guest
+    dd_timeout = float(params.get("dd_timeout", 900))
+    fail = 0
+    nr_ah = []
+    mem = params['mem']
+
+    debugfs_flag = 1
+    debugfs_path = os.path.join(test.tmpdir, 'debugfs')
+    mem_path = os.path.join("/tmp", 'thp_space')
+
+    error.context("smoke test setup")
+    if not os.path.ismount(debugfs_path):
+        if not os.path.isdir(debugfs_path):
+            os.makedirs(debugfs_path)
+        utils.run("mount -t debugfs none %s" % debugfs_path)
+
+    logging.info("Smoke test start")
+    error.context("smoke test")
+    login_timeout = float(params.get("login_timeout", "3600"))
+    vm = virt_test_utils.get_living_vm(env, params.get("main_vm"))
+    session = virt_test_utils.wait_for_login(vm, timeout=login_timeout)
+
+    nr_ah_before = get_mem_status('AnonHugePages', 'host')
+    if nr_ah_before <= 0:
+        raise error.TestFail("VM is not using transparent hugepages")
+
+    # Protect system from oom killer
+    if int(get_mem_status('MemFree', 'guest')) / 1024 < mem :
+        mem = int(get_mem_status('MemFree', 'guest')) / 1024
+
+    session.cmd("mkdir -p %s" % mem_path)
+
+    session.cmd("mount -t tmpfs -o size=%sM none %s" % (str(mem), mem_path))
+
+    count = mem / 4
+    session.cmd("dd if=/dev/zero of=%s/1 bs=4000000 count=%s" %
+                (mem_path, count), timeout=dd_timeout)
+
+    nr_ah_after = get_mem_status('AnonHugePages', 'host')
+
+    if nr_ah_after <= nr_ah_before:
+        logging.warning("VM did not use Transparent Hugepages during dd")
+
+    if debugfs_flag == 1:
+        if int(open('%s/kvm/largepages' % debugfs_path, 'r').read()) <= 0:
+            raise error.TestFail("KVM did not use Transparent Hugepages")
+
+    logging.info("Smoke test finished")
+
+    # Use parallel dd as stress for memory
+    count = count / 3
+    logging.info("Stress test start")
+    error.context("stress test")
+    output = session.cmd("for i in `seq %s`; do dd if=/dev/zero of=%s/$i "
+                         "bs=4000000 count=1& done" % (count, mem_path),
+                         timeout=dd_timeout)
+
+    if len(re.findall("No space", output)) > count * 0.05:
+        raise error.TestFail("Too many dd instances failed in guest")
+
+    try:
+        output = session.cmd('pidof dd')
+    except Exception:
+        output = None
+
+    if output is not None:
+        for i in re.split('\n+', o):
+            session.cmd('kill -9 %s' % i)
+
+    session.cmd("umount %s" % mem_path)
+    logging.info("Stress test finished")
+
+    session.close()
diff --git a/client/tests/kvm/tests/trans_hugepage_defrag.py b/client/tests/kvm/tests/trans_hugepage_defrag.py
new file mode 100644
index 0000000..ddf8f7b
--- /dev/null
+++ b/client/tests/kvm/tests/trans_hugepage_defrag.py
@@ -0,0 +1,85 @@ 
+import logging, time, commands, os, string, re
+from autotest_lib.client.common_lib import error
+from autotest_lib.client.bin import utils
+from autotest_lib.client.virt import virt_test_utils, virt_test_setup
+
+
+@error.context_aware
+def run_trans_hugepage_defrag(test, params, env):
+    """
+    KVM khugepage userspace side test:
+    1) Verify that the host supports kernel hugepages.
+        If it does proceed with the test.
+    2) Verify that the kernel hugepages can be used in host.
+    3) Verify that the kernel hugepages can be used in guest.
+    4) Migrate guest while using hugepages.
+
+    @param test: KVM test object.
+    @param params: Dictionary with test parameters.
+    @param env: Dictionary with the test environment.
+    """
+    def get_mem_status(params):
+        for line in file('/proc/meminfo', 'r').readlines():
+            if line.startswith("%s" % params):
+                output = re.split('\s+', line)[1]
+        return output
+
+
+    def set_libhugetlbfs(number):
+        f = file("/proc/sys/vm/nr_hugepages", "w+")
+        f.write(number)
+        f.seek(0)
+        ret = f.read()
+        return int(ret)
+
+
+    test_config = virt_test_setup.TransparentHugePageConfig(test, params)
+    test_config.setup()
+    # Test the defrag
+    logging.info("Defrag test start")
+    login_timeout = float(params.get("login_timeout", 360))
+    vm = virt_test_utils.get_living_vm(env, params.get("main_vm"))
+    session = virt_test_utils.wait_for_login(vm, timeout=login_timeout)
+    mem_path = os.path.join("/tmp", "thp_space")
+
+    error.context("Fragmenting guest memory")
+    try:
+        if not os.path.isdir(mem_path):
+            os.makedirs(mem_path)
+        if os.system("mount -t tmpfs none %s" % mem_path):
+            raise error.TestError("Can not mount tmpfs")
+
+        # Try to fragment the memory a bit
+        cmd = ("for i in `seq 262144`; do dd if=/dev/urandom of=%s/$i "
+               "bs=4K count=1 & done" % mem_path)
+        utils.run(cmd)
+    finally:
+        utils.run("umount %s" % mem_path)
+
+    total = int(get_mem_status('MemTotal'))
+    hugepagesize = int(get_mem_status('Hugepagesize'))
+    nr_full = str(total / hugepagesize)
+
+    error.context("activating khugepaged defrag functionality")
+    # Allocate hugepages for libhugetlbfs before and after enable defrag,
+    # and check out the difference.
+    nr_hp_before = set_libhugetlbfs(nr_full)
+    try:
+        defrag_path = os.path.join(test_config.thp_path, 'khugepaged', 'defrag')
+        file(str(defrag_path), 'w').write('yes')
+    except IOError, e:
+        raise error.TestFail("Can not start defrag on khugepaged: %s" % e)
+    # TODO: Is sitting an arbitrary amount of time appropriate? Aren't there
+    # better ways to do this?
+    time.sleep(1)
+    nr_hp_after = set_libhugetlbfs(nr_full)
+
+    if nr_hp_before >= nr_hp_after:
+        raise error.TestFail("There was no memory defragmentation on host: "
+                             "%s huge pages allocated before turning "
+                             "khugepaged defrag on, %s allocated after it" %
+                             (nr_hp_before, nr_hp_after))
+
+    session.close()
+    logging.info("Defrag test succeeded")
+    test_config.cleanup()
diff --git a/client/tests/kvm/tests/trans_hugepage_swapping.py b/client/tests/kvm/tests/trans_hugepage_swapping.py
new file mode 100644
index 0000000..63f1560
--- /dev/null
+++ b/client/tests/kvm/tests/trans_hugepage_swapping.py
@@ -0,0 +1,109 @@ 
+import logging, time, commands, os, string, re
+from autotest_lib.client.common_lib import error
+from autotest_lib.client.bin import utils
+from autotest_lib.client.virt import virt_utils, virt_test_utils
+from autotest_lib.client.virt import virt_test_setup, virt_env_process
+
+
+@error.context_aware
+def run_trans_hugepage_swapping(test, params, env):
+    """
+    KVM khugepage user side test:
+    1) Verify that the hugepages can be swapped in/out.
+
+    @param test: KVM test object.
+    @param params: Dictionary with test parameters.
+    @param env: Dictionary with the test environment.
+    """
+    def get_args(args_list):
+        """
+        Get the memory arguments from system
+        """
+        args_list_tmp = args_list.copy()
+        for line in file('/proc/meminfo', 'r').readlines():
+            for key in args_list_tmp.keys():
+                if line.startswith("%s" % args_list_tmp[key]):
+                    args_list_tmp[key] = int(re.split('\s+', line)[1])
+        return args_list_tmp
+
+    test_config = virt_test_setup.TransparentHugePageConfig(test, params)
+    test_config.setup()
+    # Swapping test
+    logging.info("Swapping test start")
+    # Parameters of memory information
+    # @total: Memory size
+    # @free: Free memory size
+    # @swap_size: Swap size
+    # @swap_free: Free swap size
+    # @hugepage_size: Page size of one hugepage
+    # @page_size: The biggest page size that app can ask for
+    args_dict_check = {"free" : "MemFree", "swap_size" : "SwapTotal",
+                       "swap_free" : "SwapFree", "total" : "MemTotal",
+                       "hugepage_size" : "Hugepagesize",}
+    args_dict = get_args(args_dict_check)
+    swap_free = []
+    total = int(args_dict['total']) / 1024
+    free = int(args_dict['free']) / 1024
+    swap_size = int(args_dict['swap_size']) / 1024
+    swap_free.append(int(args_dict['swap_free'])/1024)
+    hugepage_size = int(args_dict['hugepage_size']) / 1024
+    dd_timeout = float(params.get("dd_timeout", 900))
+    login_timeout = float(params.get("login_timeout", 360))
+    check_cmd_timeout = float(params.get("check_cmd_timeout", 900))
+    mem_path = os.path.join(test.tmpdir, 'thp_space')
+
+    # If swap is enough fill all memory with dd
+    if swap_free > (total - free):
+        count = total / hugepage_size
+        tmpfs_size = total
+    else:
+        count = free / hugepage_size
+        tmpfs_size = free
+
+    if swap_size <= 0:
+        raise logging.info("Host does not have swap enabled")
+    session = None
+    try:
+        if not os.path.isdir(mem_path):
+            os.makedirs(mem_path)
+        utils.run("mount -t tmpfs  -o size=%sM none %s" % tmpfs_path)
+
+        # Set the memory size of vm
+        # To ignore the oom killer set it to the free swap size
+        vm = virt_test_utils.get_living_vm(env, params.get("main_vm"))
+        if int(params['mem']) > swap_free[0]:
+            vm.destroy()
+            vm_name = 'vmsw'
+            vm0 =  params.get("main_vm")
+            vm0_key = virt_utils.env_get_vm(env, vm0)
+            params['vms'] = params['vms'] + " " + vm_name
+            params['mem'] = str(swap_free[0])
+            vm_key = vm0_key.clone(vm0, params)
+            virt_utils.env_register_vm(env, vm_name, vm_key)
+            virt_env_process.preprocess_vm(test, params, env, vm_name)
+            vm_key.create()
+            session = virt_utils.wait_for(vm_key.remote_login,
+                                          timeout=login_timeout)
+        else:
+            session = virt_test_utils.wait_for_login(vm, timeout=login_timeout)
+
+        error.context("making guest to swap memory")
+        cmd = ("dd if=/dev/zero of=%s/zero bs=%s000000 count=%s" %
+               (mem_path, hugepage_size, count))
+        utils.run(cmd)
+
+        args_dict = get_args(args_dict_check)
+        swap_free.append(int(args_dict['swap_free'])/1024)
+
+        if swap_free[1] - swap_free[0] >= 0:
+            raise error.TestFail("No data was swapped to memory")
+
+        # Try harder to make guest memory to be swapped
+        session.cmd("find / -name \"*\"", timeout=check_cmd_timeout)
+    finally:
+        if session is not None:
+            session.cmd("umount %s" % mem_path)
+
+    logging.info("Swapping test succeed")
+    session.close()
+    test_config.cleanup()