diff mbox

[1/3] KVM test: Subtest cleanups

Message ID 1295842941-11557-2-git-send-email-lmr@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Lucas Meneghel Rodrigues Jan. 24, 2011, 4:22 a.m. UTC
None
diff mbox

Patch

diff --git a/client/tests/kvm/tests/autotest.py b/client/tests/kvm/tests/autotest.py
index 54f581d..afc2e3b 100644
--- a/client/tests/kvm/tests/autotest.py
+++ b/client/tests/kvm/tests/autotest.py
@@ -1,7 +1,5 @@ 
-import os, logging
-from autotest_lib.client.common_lib import error
-from autotest_lib.client.bin import utils
-import kvm_subprocess, kvm_utils, kvm_test_utils
+import os
+import kvm_test_utils
 
 
 def run_autotest(test, params, env):
@@ -19,7 +17,6 @@  def run_autotest(test, params, env):
 
     # Collect test parameters
     timeout = int(params.get("test_timeout", 300))
-    migrate = params.get("migrate" , "no") == "yes"
     control_path = os.path.join(test.bindir, "autotest_control",
                                 params.get("test_control_file"))
     outputdir = test.outputdir
diff --git a/client/tests/kvm/tests/balloon_check.py b/client/tests/kvm/tests/balloon_check.py
index 9ed0a7e..0c2a367 100644
--- a/client/tests/kvm/tests/balloon_check.py
+++ b/client/tests/kvm/tests/balloon_check.py
@@ -1,6 +1,7 @@ 
-import re, string, logging, random, time
+import re, logging, random, time
 from autotest_lib.client.common_lib import error
-import kvm_test_utils, kvm_utils, kvm_monitor
+import kvm_monitor
+
 
 def run_balloon_check(test, params, env):
     """
diff --git a/client/tests/kvm/tests/boot.py b/client/tests/kvm/tests/boot.py
index 15b78b3..4fabcd5 100644
--- a/client/tests/kvm/tests/boot.py
+++ b/client/tests/kvm/tests/boot.py
@@ -1,6 +1,4 @@ 
-import logging, time
-from autotest_lib.client.common_lib import error
-import kvm_subprocess, kvm_test_utils, kvm_utils
+import time
 
 
 def run_boot(test, params, env):
diff --git a/client/tests/kvm/tests/boot_savevm.py b/client/tests/kvm/tests/boot_savevm.py
index 6acd0a2..6af4132 100644
--- a/client/tests/kvm/tests/boot_savevm.py
+++ b/client/tests/kvm/tests/boot_savevm.py
@@ -1,6 +1,7 @@ 
 import logging, time
 from autotest_lib.client.common_lib import error
-import kvm_test_utils, kvm_utils, kvm_monitor
+import kvm_monitor
+
 
 def run_boot_savevm(test, params, env):
     """
@@ -17,7 +18,7 @@  def run_boot_savevm(test, params, env):
     vm.verify_alive()
     savevm_delay = float(params.get("savevm_delay"))
     savevm_login_delay = float(params.get("savevm_login_delay"))
-    logging.info("savevm_delay = %f" % savevm_delay)
+    logging.info("savevm_delay = %f", savevm_delay)
     login_expire = time.time() + savevm_login_delay
     end_time = time.time() + float(params.get("savevm_timeout"))
 
diff --git a/client/tests/kvm/tests/build.py b/client/tests/kvm/tests/build.py
index 1eef7a1..cbf4aed 100644
--- a/client/tests/kvm/tests/build.py
+++ b/client/tests/kvm/tests/build.py
@@ -1,5 +1,6 @@ 
 import installer
 
+
 def run_build(test, params, env):
     """
     Installs KVM using the selected install mode. Most install methods will
@@ -17,7 +18,7 @@  def run_build(test, params, env):
         installer_object.set_install_params(test, params)
         installer_object.install()
         env.register_installer(installer_object)
-    except Exception,e:
+    except Exception, e:
         # if the build/install fails, don't allow other tests
         # to get a installer.
         msg = "KVM install failed: %s" % (e)
diff --git a/client/tests/kvm/tests/clock_getres.py b/client/tests/kvm/tests/clock_getres.py
index 5ab4d33..d1baf88 100644
--- a/client/tests/kvm/tests/clock_getres.py
+++ b/client/tests/kvm/tests/clock_getres.py
@@ -1,7 +1,6 @@ 
-import logging, time, os
+import logging, os
 from autotest_lib.client.common_lib import error
-from autotest_lib.client.common_lib import utils
-import kvm_test_utils, kvm_utils
+from autotest_lib.client.bin import utils
 
 
 def run_clock_getres(test, params, env):
@@ -35,4 +34,4 @@  def run_clock_getres(test, params, env):
     vm.copy_files_to(test_clock, base_dir)
     session.cmd(os.path.join(base_dir, t_name))
     logging.info("PASS: Guest reported appropriate clock resolution")
-    logging.info("Guest's dmesg:\n%s" % session.cmd_output("dmesg").strip())
+    logging.info("Guest's dmesg:\n%s", session.cmd_output("dmesg").strip())
diff --git a/client/tests/kvm/tests/enospc.py b/client/tests/kvm/tests/enospc.py
index 3868cc4..3c53b64 100644
--- a/client/tests/kvm/tests/enospc.py
+++ b/client/tests/kvm/tests/enospc.py
@@ -1,7 +1,7 @@ 
-import logging, commands, time, os, re
+import logging, time, re
 from autotest_lib.client.common_lib import error
 from autotest_lib.client.bin import utils
-import kvm_test_utils, kvm_vm
+import kvm_vm
 
 
 def run_enospc(test, params, env):
@@ -58,7 +58,7 @@  def run_enospc(test, params, env):
                     logging.error(e)
             logging.info("Guest paused, extending Logical Volume size")
             try:
-                cmd_result = utils.run("lvextend -L +200M %s" % logical_volume)
+                utils.run("lvextend -L +200M %s" % logical_volume)
             except error.CmdError, e:
                 logging.debug(e.result_obj.stdout)
             vm.monitor.cmd("cont")
@@ -71,4 +71,4 @@  def run_enospc(test, params, env):
         logging.info("Guest paused %s times from %s iterations",
                      pause_n, iterations)
 
-    logging.info("Final %s" % vm.monitor.cmd("info status"))
+    logging.info("Final %s", vm.monitor.cmd("info status"))
diff --git a/client/tests/kvm/tests/ethtool.py b/client/tests/kvm/tests/ethtool.py
index 2d5b0fc..81e45d3 100644
--- a/client/tests/kvm/tests/ethtool.py
+++ b/client/tests/kvm/tests/ethtool.py
@@ -3,6 +3,7 @@  from autotest_lib.client.common_lib import error
 from autotest_lib.client.bin import utils
 import kvm_test_utils, kvm_utils, kvm_subprocess
 
+
 def run_ethtool(test, params, env):
     """
     Test offload functions of ethernet device by ethtool
@@ -22,7 +23,7 @@  def run_ethtool(test, params, env):
         find a way to get it installed using yum/apt-get/
         whatever
     """
-    def ethtool_get(type):
+    def ethtool_get(f_type):
         feature_pattern = {
             'tx':  'tx.*checksumming',
             'rx':  'rx.*checksumming',
@@ -34,30 +35,30 @@  def run_ethtool(test, params, env):
             }
         o = session.cmd("ethtool -k %s" % ethname)
         try:
-            return re.findall("%s: (.*)" % feature_pattern.get(type), o)[0]
+            return re.findall("%s: (.*)" % feature_pattern.get(f_type), o)[0]
         except IndexError:
-            logging.debug("Could not get %s status" % type)
+            logging.debug("Could not get %s status", f_type)
 
 
-    def ethtool_set(type, status):
+    def ethtool_set(f_type, status):
         """
         Set ethernet device offload status
 
-        @param type: Offload type name
+        @param f_type: Offload type name
         @param status: New status will be changed to
         """
-        logging.info("Try to set %s %s" % (type, status))
+        logging.info("Try to set %s %s", f_type, status)
         if status not in ["off", "on"]:
             return False
-        cmd = "ethtool -K %s %s %s" % (ethname, type, status)
-        if ethtool_get(type) != status:
+        cmd = "ethtool -K %s %s %s" % (ethname, f_type, status)
+        if ethtool_get(f_type) != status:
             try:
                 session.cmd(cmd)
                 return True
             except:
                 return False
-        if ethtool_get(type) != status:
-            logging.error("Fail to set %s %s" % (type, status))
+        if ethtool_get(f_type) != status:
+            logging.error("Fail to set %s %s", f_type, status)
             return False
         return True
 
@@ -83,8 +84,7 @@  def run_ethtool(test, params, env):
         except IndexError:
             logging.error("Could not get file md5sum in guest")
             return False
-        logging.debug("md5sum: guest(%s), host(%s)" %
-                      (guest_result, host_result))
+        logging.debug("md5sum: guest(%s), host(%s)", guest_result, host_result)
         return guest_result == host_result
 
 
@@ -100,7 +100,7 @@  def run_ethtool(test, params, env):
         dd_cmd = ("dd if=/dev/urandom of=%s bs=1M count=%s" %
                   (filename, params.get("filesize")))
         failure = (False, "Failed to create file using dd, cmd: %s" % dd_cmd)
-        logging.info("Creating file in source host, cmd: %s" % dd_cmd)
+        logging.info("Creating file in source host, cmd: %s", dd_cmd)
         tcpdump_cmd = "tcpdump -lep -s 0 tcp -vv port ssh"
         if src == "guest":
             tcpdump_cmd += " and src %s" % guest_ip
@@ -122,7 +122,7 @@  def run_ethtool(test, params, env):
                                       utils.system_output("/bin/netstat -nap"))
         for i in original_tcp_ports:
             tcpdump_cmd += " and not port %s" % i
-        logging.debug("Listen by command: %s" % tcpdump_cmd)
+        logging.debug("Listen using command: %s", tcpdump_cmd)
         session2.sendline(tcpdump_cmd)
         if not kvm_utils.wait_for(
                            lambda:session.cmd_status("pgrep tcpdump") == 0, 30):
@@ -205,24 +205,24 @@  def run_ethtool(test, params, env):
     ethtool_save_params()
     success = True
     try:
-        for type in supported_features:
-            callback = test_matrix[type][0]
-            for i in test_matrix[type][2]:
+        for f_type in supported_features:
+            callback = test_matrix[f_type][0]
+            for i in test_matrix[f_type][2]:
                 if not ethtool_set(i, "off"):
-                    logging.error("Fail to disable %s" % i)
+                    logging.error("Fail to disable %s", i)
                     success = False
-            for i in [f for f in test_matrix[type][1]] + [type]:
+            for i in [f for f in test_matrix[f_type][1]] + [f_type]:
                 if not ethtool_set(i, "on"):
-                    logging.error("Fail to enable %s" % i)
+                    logging.error("Fail to enable %s", i)
                     success = False
             if not callback():
-                raise error.TestFail("Test failed, %s: on" % type)
+                raise error.TestFail("Test failed, %s: on", f_type)
 
-            if not ethtool_set(type, "off"):
-                logging.error("Fail to disable %s" % type)
+            if not ethtool_set(f_type, "off"):
+                logging.error("Fail to disable %s", f_type)
                 success = False
             if not callback(status="off"):
-                raise error.TestFail("Test failed, %s: off" % type)
+                raise error.TestFail("Test failed, %s: off", f_type)
         if not success:
             raise error.TestError("Enable/disable offload function fail")
     finally:
diff --git a/client/tests/kvm/tests/file_transfer.py b/client/tests/kvm/tests/file_transfer.py
index a192f19..fe70b37 100644
--- a/client/tests/kvm/tests/file_transfer.py
+++ b/client/tests/kvm/tests/file_transfer.py
@@ -1,7 +1,7 @@ 
-import logging, commands, re, time, os
+import logging, time, os
 from autotest_lib.client.common_lib import error
 from autotest_lib.client.bin import utils
-import kvm_utils, kvm_test_utils
+
 
 def run_file_transfer(test, params, env):
     """
@@ -19,9 +19,9 @@  def run_file_transfer(test, params, env):
     """
     vm = env.get_vm(params["main_vm"])
     vm.verify_alive()
-    timeout=int(params.get("login_timeout", 360))
+    login_timeout = int(params.get("login_timeout", 360))
 
-    session = vm.wait_for_login(timeout=timeout)
+    session = vm.wait_for_login(timeout=login_timeout)
 
     dir_name = test.tmpdir
     transfer_timeout = int(params.get("transfer_timeout"))
diff --git a/client/tests/kvm/tests/guest_s4.py b/client/tests/kvm/tests/guest_s4.py
index 5641654..efd8e3b 100644
--- a/client/tests/kvm/tests/guest_s4.py
+++ b/client/tests/kvm/tests/guest_s4.py
@@ -1,6 +1,6 @@ 
 import logging, time
 from autotest_lib.client.common_lib import error
-import kvm_test_utils, kvm_utils
+import kvm_utils
 
 
 @error.context_aware
@@ -38,7 +38,7 @@  def run_guest_s4(test, params, env):
     error.context("making sure background program is running")
     check_s4_cmd = params.get("check_s4_cmd")
     session2.cmd(check_s4_cmd)
-    logging.info("Launched background command in guest: %s" % test_s4_cmd)
+    logging.info("Launched background command in guest: %s", test_s4_cmd)
     error.context()
     error.base_context()
 
diff --git a/client/tests/kvm/tests/guest_test.py b/client/tests/kvm/tests/guest_test.py
index 3e778e9..95c6f7f 100644
--- a/client/tests/kvm/tests/guest_test.py
+++ b/client/tests/kvm/tests/guest_test.py
@@ -1,6 +1,5 @@ 
 import os, logging
-from autotest_lib.client.common_lib import error
-import kvm_utils, kvm_test_utils
+import kvm_utils
 
 
 def run_guest_test(test, params, env):
@@ -56,7 +55,7 @@  def run_guest_test(test, params, env):
             logging.debug("Clean directory succeeded.")
 
             # then download the resource.
-            rsc_cmd = "cd %s && %s %s" %(dst_rsc_dir, download_cmd, rsc_server)
+            rsc_cmd = "cd %s && %s %s" % (dst_rsc_dir, download_cmd, rsc_server)
             session.cmd(rsc_cmd, timeout=test_timeout)
             logging.info("Download resource finished.")
         else:
diff --git a/client/tests/kvm/tests/image_copy.py b/client/tests/kvm/tests/image_copy.py
index 87bafea..8a4d74c 100644
--- a/client/tests/kvm/tests/image_copy.py
+++ b/client/tests/kvm/tests/image_copy.py
@@ -1,4 +1,4 @@ 
-import os, logging, commands
+import os, logging
 from autotest_lib.client.common_lib import error
 from autotest_lib.client.bin import utils
 import kvm_utils
@@ -27,7 +27,6 @@  def run_image_copy(test, params, env):
                               mount_dest_dir)
 
     src = params.get('images_good')
-    mnt_cmd = 'mount %s %s -o ro' % (src, mount_dest_dir)
     image = '%s.%s' % (os.path.split(params['image_name'])[1],
                        params['image_format'])
     src_path = os.path.join(mount_dest_dir, image)
@@ -42,5 +41,5 @@  def run_image_copy(test, params, env):
     if not os.path.exists(src_path):
         raise error.TestError('Could not find %s in NFS share' % src_path)
 
-    logging.debug('Copying image %s...' % image)
+    logging.debug('Copying image %s...', image)
     utils.system(cmd)
diff --git a/client/tests/kvm/tests/iofuzz.py b/client/tests/kvm/tests/iofuzz.py
index ed22814..7189f91 100644
--- a/client/tests/kvm/tests/iofuzz.py
+++ b/client/tests/kvm/tests/iofuzz.py
@@ -1,6 +1,6 @@ 
-import logging, time, re, random
+import logging, re, random
 from autotest_lib.client.common_lib import error
-import kvm_subprocess, kvm_test_utils, kvm_utils
+import kvm_subprocess
 
 
 def run_iofuzz(test, params, env):
@@ -69,7 +69,7 @@  def run_iofuzz(test, params, env):
         for (op, operand) in inst_list:
             if op == "read":
                 inb(session, operand[0])
-            elif op =="write":
+            elif op == "write":
                 outb(session, operand[0], operand[1])
             else:
                 raise error.TestError("Unknown command %s" % op)
diff --git a/client/tests/kvm/tests/ioquit.py b/client/tests/kvm/tests/ioquit.py
index 80c8221..34b4fb5 100644
--- a/client/tests/kvm/tests/ioquit.py
+++ b/client/tests/kvm/tests/ioquit.py
@@ -1,6 +1,4 @@ 
 import logging, time, random
-from autotest_lib.client.common_lib import error
-import kvm_test_utils
 
 
 def run_ioquit(test, params, env):
@@ -24,7 +22,7 @@  def run_ioquit(test, params, env):
         session2.cmd(check_cmd, timeout=60)
 
         logging.info("Sleep for a while")
-        time.sleep(random.randrange(30,100))
+        time.sleep(random.randrange(30, 100))
         session2.cmd(check_cmd, timeout=60)
         logging.info("Kill the virtual machine")
         vm.process.close()
diff --git a/client/tests/kvm/tests/iozone_windows.py b/client/tests/kvm/tests/iozone_windows.py
index 508ae95..4046106 100644
--- a/client/tests/kvm/tests/iozone_windows.py
+++ b/client/tests/kvm/tests/iozone_windows.py
@@ -1,8 +1,6 @@ 
-import logging, time, os
-from autotest_lib.client.common_lib import error
+import logging, os
 from autotest_lib.client.bin import utils
 from autotest_lib.client.tests.iozone import postprocessing
-import kvm_subprocess, kvm_test_utils, kvm_utils
 
 
 def run_iozone_windows(test, params, env):
diff --git a/client/tests/kvm/tests/jumbo.py b/client/tests/kvm/tests/jumbo.py
index e20aa9f..b7f88ae 100644
--- a/client/tests/kvm/tests/jumbo.py
+++ b/client/tests/kvm/tests/jumbo.py
@@ -3,6 +3,7 @@  from autotest_lib.client.common_lib import error
 from autotest_lib.client.bin import utils
 import kvm_test_utils, kvm_utils
 
+
 def run_jumbo(test, params, env):
     """
     Test the RX jumbo frame function of vnics:
@@ -88,7 +89,7 @@  def run_jumbo(test, params, env):
         def size_increase_ping(step=random.randrange(90, 110)):
             logging.info("Size increase ping")
             for size in range(0, max_icmp_pkt_size + 1, step):
-                logging.info("Ping %s with size %s" % (ip, size))
+                logging.info("Ping %s with size %s", ip, size)
                 s, o = kvm_test_utils.ping(ip, 1, interface=ifname,
                                            packetsize=size,
                                            hint="do", timeout=1)
diff --git a/client/tests/kvm/tests/kdump.py b/client/tests/kvm/tests/kdump.py
index 70217ad..c847131 100644
--- a/client/tests/kvm/tests/kdump.py
+++ b/client/tests/kvm/tests/kdump.py
@@ -1,6 +1,6 @@ 
-import logging, time
+import logging
 from autotest_lib.client.common_lib import error
-import kvm_subprocess, kvm_test_utils, kvm_utils
+import kvm_utils
 
 
 def run_kdump(test, params, env):
diff --git a/client/tests/kvm/tests/ksm_overcommit.py b/client/tests/kvm/tests/ksm_overcommit.py
index 76ad3b0..5aba25a 100644
--- a/client/tests/kvm/tests/ksm_overcommit.py
+++ b/client/tests/kvm/tests/ksm_overcommit.py
@@ -1,4 +1,4 @@ 
-import logging, time, random, string, math, os, tempfile
+import logging, time, random, math, os
 from autotest_lib.client.common_lib import error
 from autotest_lib.client.bin import utils
 import kvm_subprocess, kvm_test_utils, kvm_utils, kvm_preprocessing
@@ -28,9 +28,7 @@  def run_ksm_overcommit(test, params, env):
         logging.debug("Starting ksm_overcommit_guest.py on guest %s", vm.name)
         session.sendline("python /tmp/ksm_overcommit_guest.py")
         try:
-            (match, data) = session.read_until_last_line_matches(
-                                                            ["PASS:", "FAIL:"],
-                                                            timeout)
+            session.read_until_last_line_matches(["PASS:", "FAIL:"], timeout)
         except kvm_subprocess.ExpectProcessTerminatedError, e:
             e_msg = ("Command ksm_overcommit_guest.py on vm '%s' failed: %s" %
                      (vm.name, str(e)))
@@ -84,7 +82,7 @@  def run_ksm_overcommit(test, params, env):
         for session in lsessions:
             vm = lvms[lsessions.index(session)]
 
-            logging.debug("Turning off swap on vm %s" % vm.name)
+            logging.debug("Turning off swap on vm %s", vm.name)
             session.cmd("swapoff -a", timeout=300)
 
             # Start the allocator
@@ -113,7 +111,7 @@  def run_ksm_overcommit(test, params, env):
                     raise error.TestError("SHM didn't merge the memory until "
                                           "the DL on guest: %s" % vm.name)
                 st = ksm_size / 200 * perf_ratio
-                logging.debug("Waiting %ds before proceeding..." % st)
+                logging.debug("Waiting %ds before proceeding...", st)
                 time.sleep(st)
                 if (new_ksm):
                     shm = get_ksmstat()
@@ -139,8 +137,8 @@  def run_ksm_overcommit(test, params, env):
         logging.info("Phase 2: Split the pages on the first guest")
 
         a_cmd = "mem.static_random_fill()"
-        (match, data) = _execute_allocator(a_cmd, lvms[0], lsessions[0],
-                                           120 * perf_ratio)
+        data = _execute_allocator(a_cmd, lvms[0], lsessions[0],
+                                  120 * perf_ratio)[1]
 
         r_msg = data.splitlines()[-1]
         logging.debug("Return message of static_random_fill: %s", r_msg)
@@ -170,8 +168,8 @@  def run_ksm_overcommit(test, params, env):
             vm = lvms[i]
             session = lsessions[i]
             a_cmd = "mem.static_random_fill()"
-            logging.debug("Executing %s on ksm_overcommit_guest.py loop, vm: %s",
-                          a_cmd, vm.name)
+            logging.debug("Executing %s on ksm_overcommit_guest.py loop, "
+                          "vm: %s", a_cmd, vm.name)
             session.sendline(a_cmd)
 
             out = ""
@@ -187,7 +185,7 @@  def run_ksm_overcommit(test, params, env):
                     if (ksm_swap):
                         free_mem = (free_mem +
                                     int(utils.read_from_meminfo("SwapFree")))
-                    logging.debug("Free memory on host: %d" % (free_mem))
+                    logging.debug("Free memory on host: %d", free_mem)
 
                     # We need to keep some memory for python to run.
                     if (free_mem < 64000) or (ksm_swap and
@@ -197,15 +195,15 @@  def run_ksm_overcommit(test, params, env):
                             lvms[j].destroy(gracefully = False)
                         time.sleep(20)
                         vm.monitor.cmd("c")
-                        logging.debug("Only %s free memory, killing %d guests" %
-                                      (free_mem, (i-1)))
+                        logging.debug("Only %s free memory, killing %d guests",
+                                      free_mem, (i - 1))
                         last_vm = i
                         break
                     out = session.read_nonblocking(0.1)
                     time.sleep(2)
-            except OSError, (err):
-                logging.debug("Only %s host free memory, killing %d guests" %
-                              (free_mem, (i - 1)))
+            except OSError:
+                logging.debug("Only %s host free memory, killing %d guests",
+                              free_mem, (i - 1))
                 logging.debug("Stopping %s", vm.name)
                 vm.monitor.cmd("stop")
                 for j in range(0, i):
@@ -217,7 +215,7 @@  def run_ksm_overcommit(test, params, env):
 
             if last_vm != 0:
                 break
-            logging.debug("Memory filled for guest %s" % (vm.name))
+            logging.debug("Memory filled for guest %s", vm.name)
 
         logging.info("Phase 3a: PASS")
 
@@ -226,7 +224,7 @@  def run_ksm_overcommit(test, params, env):
             lsessions[i].close()
             if i == (vmsc - 1):
                 logging.debug(kvm_test_utils.get_memory_info([lvms[i]]))
-            logging.debug("Destroying guest %s" % lvms[i].name)
+            logging.debug("Destroying guest %s", lvms[i].name)
             lvms[i].destroy(gracefully = False)
 
         # Verify last machine with randomly generated memory
@@ -262,8 +260,8 @@  def run_ksm_overcommit(test, params, env):
         logging.info("Phase 1: PASS")
 
         logging.info("Phase 2a: Simultaneous merging")
-        logging.debug("Memory used by allocator on guests = %dMB" %
-                     (ksm_size / max_alloc))
+        logging.debug("Memory used by allocator on guests = %dMB",
+                      (ksm_size / max_alloc))
 
         for i in range(0, max_alloc):
             a_cmd = "mem = MemFill(%d, %s, %s)" % ((ksm_size / max_alloc),
@@ -298,46 +296,46 @@  def run_ksm_overcommit(test, params, env):
         # Actual splitting
         for i in range(0, max_alloc):
             a_cmd = "mem.static_random_fill()"
-            (match, data) = _execute_allocator(a_cmd, vm, lsessions[i],
-                                               90 * perf_ratio)
+            data = _execute_allocator(a_cmd, vm, lsessions[i],
+                                      90 * perf_ratio)[1]
 
             data = data.splitlines()[-1]
             logging.debug(data)
             out = int(data.split()[4])
-            logging.debug("Performance: %dMB * 1000 / %dms = %dMB/s" %
-                         ((ksm_size / max_alloc), out,
-                          (ksm_size * 1000 / out / max_alloc)))
+            logging.debug("Performance: %dMB * 1000 / %dms = %dMB/s",
+                          (ksm_size / max_alloc), out,
+                          (ksm_size * 1000 / out / max_alloc))
         logging.debug(kvm_test_utils.get_memory_info([vm]))
         logging.info("Phase 2b: PASS")
 
         logging.info("Phase 2c: Simultaneous verification")
         for i in range(0, max_alloc):
             a_cmd = "mem.static_random_verify()"
-            (match, data) = _execute_allocator(a_cmd, vm, lsessions[i],
-                                               (mem / 200 * 50 * perf_ratio))
+            data = _execute_allocator(a_cmd, vm, lsessions[i],
+                                      (mem / 200 * 50 * perf_ratio))[1]
         logging.info("Phase 2c: PASS")
 
         logging.info("Phase 2d: Simultaneous merging")
         # Actual splitting
         for i in range(0, max_alloc):
             a_cmd = "mem.value_fill(%d)" % skeys[0]
-            (match, data) = _execute_allocator(a_cmd, vm, lsessions[i],
-                                               120 * perf_ratio)
+            data = _execute_allocator(a_cmd, vm, lsessions[i],
+                                      120 * perf_ratio)[1]
         logging.debug(kvm_test_utils.get_memory_info([vm]))
         logging.info("Phase 2d: PASS")
 
         logging.info("Phase 2e: Simultaneous verification")
         for i in range(0, max_alloc):
             a_cmd = "mem.value_check(%d)" % skeys[0]
-            (match, data) = _execute_allocator(a_cmd, vm, lsessions[i],
-                                               (mem / 200 * 50 * perf_ratio))
+            data = _execute_allocator(a_cmd, vm, lsessions[i],
+                                      (mem / 200 * 50 * perf_ratio))[1]
         logging.info("Phase 2e: PASS")
 
         logging.info("Phase 2f: Simultaneous spliting last 96B")
         for i in range(0, max_alloc):
             a_cmd = "mem.static_random_fill(96)"
-            (match, data) = _execute_allocator(a_cmd, vm, lsessions[i],
-                                               60 * perf_ratio)
+            data = _execute_allocator(a_cmd, vm, lsessions[i],
+                                      60 * perf_ratio)[1]
 
             data = data.splitlines()[-1]
             out = int(data.split()[4])
@@ -371,10 +369,12 @@  def run_ksm_overcommit(test, params, env):
         utils.run("echo 5000 > /sys/kernel/mm/ksm/pages_to_scan")
         utils.run("echo 1 > /sys/kernel/mm/ksm/run")
 
-        if (os.path.exists("/sys/kernel/mm/transparent_hugepage/enabled")):
-            utils.run("echo 'never' > /sys/kernel/mm/transparent_hugepage/enabled ")
-        if (os.path.exists("/sys/kernel/mm/redhat_transparent_hugepage/enabled")):
-            utils.run("echo 'never' > /sys/kernel/mm/redhat_transparent_hugepage/enabled ")
+        e_up = "/sys/kernel/mm/transparent_hugepage/enabled"
+        e_rh = "/sys/kernel/mm/redhat_transparent_hugepage/enabled"
+        if os.path.exists(e_up):
+            utils.run("echo 'never' > %s" % e_up)
+        if os.path.exists(e_rh):
+            utils.run("echo 'never' > %s" % e_rh)
         new_ksm = True
     else:
         try:
@@ -516,14 +516,14 @@  def run_ksm_overcommit(test, params, env):
             key = random.randrange(0, 999)
         dkeys.append(key)
 
-    logging.debug("skeys: %s" % skeys)
-    logging.debug("dkeys: %s" % dkeys)
+    logging.debug("skeys: %s", skeys)
+    logging.debug("dkeys: %s", dkeys)
 
     lvms = []
     lsessions = []
 
     # As we don't know the number and memory amount of VMs in advance,
-    # we need to specify and create them here (FIXME: not a nice thing)
+    # we need to specify and create them here
     vm_name = params.get("main_vm")
     params['mem'] = mem
     params['vms'] = vm_name
@@ -539,7 +539,7 @@  def run_ksm_overcommit(test, params, env):
 
     # ksm_size: amount of memory used by allocator
     ksm_size = mem - guest_reserve
-    logging.debug("Memory used by allocator on guests = %dM" % (ksm_size))
+    logging.debug("Memory used by allocator on guests = %dM", ksm_size)
 
     # Creating the first guest
     kvm_preprocessing.preprocess_vm(test, params, env, vm_name)
@@ -575,7 +575,7 @@  def run_ksm_overcommit(test, params, env):
         env.register_vm(vm_name, lvms[i])
         params['vms'] += " " + vm_name
 
-        logging.debug("Booting guest %s" % lvms[i].name)
+        logging.debug("Booting guest %s", lvms[i].name)
         lvms[i].create()
         if not lvms[i].is_alive():
             raise error.TestError("VM %s seems to be dead; Test requires a"
diff --git a/client/tests/kvm/tests/linux_s3.py b/client/tests/kvm/tests/linux_s3.py
index ece8676..5a04fca 100644
--- a/client/tests/kvm/tests/linux_s3.py
+++ b/client/tests/kvm/tests/linux_s3.py
@@ -1,6 +1,5 @@ 
 import logging, time
 from autotest_lib.client.common_lib import error
-import kvm_test_utils
 
 
 def run_linux_s3(test, params, env):
@@ -23,8 +22,8 @@  def run_linux_s3(test, params, env):
     time.sleep(10)
 
     src_tty = session.cmd_output("fgconsole").strip()
-    logging.info("Current virtual terminal is %s" % src_tty)
-    if src_tty not in map(str, range(1,10)):
+    logging.info("Current virtual terminal is %s", src_tty)
+    if src_tty not in map(str, range(1, 10)):
         raise error.TestFail("Got a strange current vt (%s)" % src_tty)
 
     dst_tty = "1"
diff --git a/client/tests/kvm/tests/migration.py b/client/tests/kvm/tests/migration.py
index d6dc1b0..b462e66 100644
--- a/client/tests/kvm/tests/migration.py
+++ b/client/tests/kvm/tests/migration.py
@@ -1,6 +1,6 @@ 
 import logging, time
 from autotest_lib.client.common_lib import error
-import kvm_subprocess, kvm_test_utils, kvm_utils
+import kvm_utils
 
 
 def run_migration(test, params, env):
@@ -66,7 +66,7 @@  def run_migration(test, params, env):
         if output != reference_output:
             logging.info("Command output before migration differs from "
                          "command output after migration")
-            logging.info("Command: %s" % test_command)
+            logging.info("Command: %s", test_command)
             logging.info("Output before:" +
                          kvm_utils.format_str_for_message(reference_output))
             logging.info("Output after:" +
diff --git a/client/tests/kvm/tests/migration_multi_host.py b/client/tests/kvm/tests/migration_multi_host.py
index 7647af4..30e3ecc 100644
--- a/client/tests/kvm/tests/migration_multi_host.py
+++ b/client/tests/kvm/tests/migration_multi_host.py
@@ -1,6 +1,5 @@ 
-import logging, time, socket
+import logging, socket
 from autotest_lib.client.common_lib import error
-import kvm_subprocess, kvm_test_utils, kvm_utils
 
 
 def run_migration_multi_host(test, params, env):
@@ -57,10 +56,10 @@  def run_migration_multi_host(test, params, env):
         s_socket.listen(1)
 
         # Wait 30 seconds for source and dest to reach this point
-        test.job.barrier(srchost,'socket_started', 30).rendezvous(srchost,
-                                                                  dsthost)
+        test.job.barrier(srchost, 'socket_started', 30).rendezvous(srchost,
+                                                                   dsthost)
 
-        c_socket, addr = s_socket.accept()
+        c_socket = s_socket.accept()[0]
         mig_port = int(c_socket.recv(6))
         logging.info("Received from destination the migration port %s",
                      mig_port)
diff --git a/client/tests/kvm/tests/migration_with_file_transfer.py b/client/tests/kvm/tests/migration_with_file_transfer.py
index 288e748..044c0c8 100644
--- a/client/tests/kvm/tests/migration_with_file_transfer.py
+++ b/client/tests/kvm/tests/migration_with_file_transfer.py
@@ -1,7 +1,7 @@ 
 import logging, time, os
 from autotest_lib.client.common_lib import utils, error
 from autotest_lib.client.bin import utils as client_utils
-import kvm_subprocess, kvm_test_utils, kvm_utils
+import kvm_utils
 
 
 @error.context_aware
diff --git a/client/tests/kvm/tests/migration_with_reboot.py b/client/tests/kvm/tests/migration_with_reboot.py
index b806b3c..a15f983 100644
--- a/client/tests/kvm/tests/migration_with_reboot.py
+++ b/client/tests/kvm/tests/migration_with_reboot.py
@@ -1,7 +1,4 @@ 
-import logging, time
-import threading
-from autotest_lib.client.common_lib import error
-import kvm_subprocess, kvm_utils, kvm_test_utils
+import kvm_utils
 
 
 def run_migration_with_reboot(test, params, env):
diff --git a/client/tests/kvm/tests/netperf.py b/client/tests/kvm/tests/netperf.py
index 819562a..e1153e1 100644
--- a/client/tests/kvm/tests/netperf.py
+++ b/client/tests/kvm/tests/netperf.py
@@ -1,7 +1,8 @@ 
-import logging, commands, os
+import logging, os
 from autotest_lib.client.common_lib import error
 from autotest_lib.client.bin import utils
-import kvm_test_utils, kvm_subprocess
+import kvm_subprocess
+
 
 def run_netperf(test, params, env):
     """
diff --git a/client/tests/kvm/tests/nic_bonding.py b/client/tests/kvm/tests/nic_bonding.py
index 52ce0ae..edbf916 100644
--- a/client/tests/kvm/tests/nic_bonding.py
+++ b/client/tests/kvm/tests/nic_bonding.py
@@ -1,7 +1,7 @@ 
 import logging, time, threading
-from autotest_lib.client.common_lib import error
 from autotest_lib.client.tests.kvm.tests import file_transfer
-import kvm_test_utils, kvm_utils
+import kvm_utils
+
 
 def run_nic_bonding(test, params, env):
     """
@@ -34,7 +34,8 @@  def run_nic_bonding(test, params, env):
     vm = env.get_vm(params["main_vm"])
     vm.verify_alive()
     session_serial = vm.wait_for_serial_login(timeout=timeout)
-    script_path = kvm_utils.get_path(test.bindir, "scripts/nic_bonding_guest.py")
+    script_path = kvm_utils.get_path(test.bindir,
+                                     "scripts/nic_bonding_guest.py")
     vm.copy_files_to(script_path, "/tmp/nic_bonding_guest.py")
     cmd = "python /tmp/nic_bonding_guest.py %s" % vm.get_mac_address()
     session_serial.cmd(cmd)
diff --git a/client/tests/kvm/tests/nic_hotplug.py b/client/tests/kvm/tests/nic_hotplug.py
index edfa980..50a3ce9 100644
--- a/client/tests/kvm/tests/nic_hotplug.py
+++ b/client/tests/kvm/tests/nic_hotplug.py
@@ -1,6 +1,6 @@ 
-import logging, os, commands, re, time
+import logging
 from autotest_lib.client.common_lib import error
-import kvm_subprocess, kvm_test_utils, kvm_utils, kvm_vm
+import kvm_test_utils, kvm_utils
 
 
 def run_nic_hotplug(test, params, env):
@@ -39,7 +39,7 @@  def run_nic_hotplug(test, params, env):
         netdev_extra_params = params.get("netdev_extra_params")
         if netdev_extra_params:
             attach_cmd += ",%s" % netdev_extra_params
-        logging.info("Adding netdev through %s" % attach_cmd)
+        logging.info("Adding netdev through %s", attach_cmd)
         vm.monitor.cmd(attach_cmd)
 
         network = vm.monitor.info("network")
@@ -49,13 +49,13 @@  def run_nic_hotplug(test, params, env):
         else:
             return netdev_id
 
-    def netdev_del(vm, id):
-        vm.monitor.cmd("netdev_del %s" % id)
+    def netdev_del(vm, n_id):
+        vm.monitor.cmd("netdev_del %s" % n_id)
 
         network = vm.monitor.info("network")
-        if id in network:
+        if n_id in network:
             logging.error(network)
-            raise error.TestError("Fail to remove netdev %s" % id)
+            raise error.TestError("Fail to remove netdev %s" % n_id)
 
     def nic_add(vm, model, netdev_id, mac):
         """
@@ -66,23 +66,24 @@  def run_nic_hotplug(test, params, env):
         @netdev_id: id of netdev
         @mac: Mac address of new nic
         """
-        id = kvm_utils.generate_random_id()
-        if model=="virtio": model="virtio-net-pci"
+        nic_id = kvm_utils.generate_random_id()
+        if model == "virtio":
+            model = "virtio-net-pci"
         device_add_cmd = "device_add %s,netdev=%s,mac=%s,id=%s" % (model,
                                                                    netdev_id,
-                                                                   mac, id)
-        logging.info("Adding nic through %s" % device_add_cmd)
+                                                                   mac, nic_id)
+        logging.info("Adding nic through %s", device_add_cmd)
         vm.monitor.cmd(device_add_cmd)
 
         qdev = vm.monitor.info("qtree")
         if id not in qdev:
             logging.error(qdev)
             raise error.TestFail("Device %s was not plugged into qdev"
-                                 "tree" % id)
+                                 "tree" % nic_id)
         else:
-            return id
+            return nic_id
 
-    def nic_del(vm, id, wait=True):
+    def nic_del(vm, nic_id, wait=True):
         """
         Remove the nic from pci tree.
 
@@ -90,17 +91,17 @@  def run_nic_hotplug(test, params, env):
         @id: the nic id
         @wait: Whether need to wait for the guest to unplug the device
         """
-        nic_del_cmd = "device_del %s" % id
+        nic_del_cmd = "device_del %s" % nic_id
         vm.monitor.cmd(nic_del_cmd)
         if wait:
             logging.info("waiting for the guest to finish the unplug")
-            if not kvm_utils.wait_for(lambda: id not in
+            if not kvm_utils.wait_for(lambda: nic_id not in
                                       vm.monitor.info("qtree"),
                                       guest_delay, 5 ,1):
                 logging.error(vm.monitor.info("qtree"))
                 raise error.TestError("Device is not unplugged by "
                                       "guest, please check whether the "
-                                      "hotplug module was loaded in guest");
+                                      "hotplug module was loaded in guest")
 
     logging.info("Attach a virtio nic to vm")
     mac = kvm_utils.generate_mac_address(vm.instance, 1)
@@ -125,7 +126,7 @@  def run_nic_hotplug(test, params, env):
         if not kvm_utils.verify_ip_address_ownership(ip, mac):
             raise error.TestFail("Could not verify the ip address of new nic")
         else:
-            logging.info("Got the ip address of new nic: %s" % ip)
+            logging.info("Got the ip address of new nic: %s", ip)
 
         logging.info("Ping test the new nic ...")
         s, o = kvm_test_utils.ping(ip, 100)
@@ -135,7 +136,7 @@  def run_nic_hotplug(test, params, env):
 
         logging.info("Detaching a virtio nic from vm")
         nic_del(vm, device_id)
-        netdev_del(vm,netdev_id)
+        netdev_del(vm, netdev_id)
 
     finally:
         vm.free_mac_address(1)
diff --git a/client/tests/kvm/tests/nic_promisc.py b/client/tests/kvm/tests/nic_promisc.py
index b9a52ff..c6d70b6 100644
--- a/client/tests/kvm/tests/nic_promisc.py
+++ b/client/tests/kvm/tests/nic_promisc.py
@@ -3,6 +3,7 @@  from autotest_lib.client.common_lib import error
 from autotest_lib.client.bin import utils
 import kvm_utils, kvm_test_utils
 
+
 def run_nic_promisc(test, params, env):
     """
     Test nic driver in promisc mode:
@@ -66,7 +67,7 @@  def run_nic_promisc(test, params, env):
     success_counter = 0
     try:
         for size in file_size:
-            logging.info("Create %s bytes file on host" % size)
+            logging.info("Create %s bytes file on host", size)
             utils.run(dd_cmd % (filename, int(size)))
 
             logging.info("Transfer file from host to guest")
@@ -81,7 +82,7 @@  def run_nic_promisc(test, params, env):
             else:
                 success_counter += 1
 
-            logging.info("Create %s bytes file on guest" % size)
+            logging.info("Create %s bytes file on guest", size)
             session.cmd(dd_cmd % (filename, int(size)), timeout=100)
 
             logging.info("Transfer file from guest to host")
diff --git a/client/tests/kvm/tests/nicdriver_unload.py b/client/tests/kvm/tests/nicdriver_unload.py
index d4ddfdd..065c60e 100644
--- a/client/tests/kvm/tests/nicdriver_unload.py
+++ b/client/tests/kvm/tests/nicdriver_unload.py
@@ -1,7 +1,8 @@ 
 import logging, threading, os
 from autotest_lib.client.common_lib import error
 from autotest_lib.client.bin import utils
-import kvm_utils, kvm_test_utils
+import kvm_test_utils
+
 
 def run_nicdriver_unload(test, params, env):
     """
diff --git a/client/tests/kvm/tests/pci_hotplug.py b/client/tests/kvm/tests/pci_hotplug.py
index 891c48d..4bb8bfb 100644
--- a/client/tests/kvm/tests/pci_hotplug.py
+++ b/client/tests/kvm/tests/pci_hotplug.py
@@ -1,6 +1,6 @@ 
-import logging, os, commands, re
+import re
 from autotest_lib.client.common_lib import error
-import kvm_subprocess, kvm_test_utils, kvm_utils, kvm_vm
+import kvm_subprocess, kvm_utils, kvm_vm
 
 
 def run_pci_hotplug(test, params, env):
@@ -80,11 +80,12 @@  def run_pci_hotplug(test, params, env):
 
     elif cmd_type == "device_add":
         driver_id = test_type + "-" + kvm_utils.generate_random_id()
-        id = test_type + "-" + kvm_utils.generate_random_id()
+        device_id = test_type + "-" + kvm_utils.generate_random_id()
         if test_type == "nic":
             if tested_model == "virtio":
                 tested_model = "virtio-net-pci"
-            pci_add_cmd = "device_add id=%s,driver=%s" % (id, tested_model)
+            pci_add_cmd = "device_add id=%s,driver=%s" % (device_id,
+                                                          tested_model)
 
         elif test_type == "block":
             image_params = params.object_params("stg")
@@ -103,13 +104,14 @@  def run_pci_hotplug(test, params, env):
                                           controller_model)
 
             if controller_model is not None:
-                controller_id = "controller-" + id
+                controller_id = "controller-" + device_id
                 controller_add_cmd = ("device_add %s,id=%s" %
                                       (controller_model, controller_id))
-                controller_output = vm.monitor.cmd(controller_add_cmd)
+                vm.monitor.cmd(controller_add_cmd)
 
             if drive_cmd_type == "drive_add":
-                driver_add_cmd = ("drive_add auto file=%s,if=none,id=%s,format=%s" %
+                driver_add_cmd = ("drive_add auto "
+                                  "file=%s,if=none,id=%s,format=%s" %
                                   (image_filename, driver_id, image_format))
             elif drive_cmd_type == "__com.redhat_drive_add":
                 driver_add_cmd = ("__com.redhat_drive_add "
@@ -117,8 +119,8 @@  def run_pci_hotplug(test, params, env):
                                   (image_filename, image_format, driver_id))
 
             pci_add_cmd = ("device_add id=%s,driver=%s,drive=%s" %
-                           (id, tested_model, driver_id))
-            driver_output = vm.monitor.cmd(driver_add_cmd)
+                           (device_id, tested_model, driver_id))
+            vm.monitor.cmd(driver_add_cmd)
 
         # Check if the device is support in qemu
         if len(re.findall(tested_model, devices_support)) > 0:
@@ -142,7 +144,7 @@  def run_pci_hotplug(test, params, env):
             pci_addr = "%x:%x:%x" % (domain, bus, slot)
             cmd = "pci_del pci_addr=%s" % pci_addr
         elif cmd_type == "device_add":
-            cmd = "device_del %s" % id
+            cmd = "device_del %s" % device_id
         # This should be replaced by a proper monitor method call
         vm.monitor.cmd(cmd)
 
diff --git a/client/tests/kvm/tests/physical_resources_check.py b/client/tests/kvm/tests/physical_resources_check.py
index 5358a64..f9e603c 100644
--- a/client/tests/kvm/tests/physical_resources_check.py
+++ b/client/tests/kvm/tests/physical_resources_check.py
@@ -1,6 +1,6 @@ 
 import re, string, logging
 from autotest_lib.client.common_lib import error
-import kvm_test_utils, kvm_utils, kvm_monitor
+import kvm_monitor
 
 
 def run_physical_resources_check(test, params, env):
@@ -36,8 +36,8 @@  def run_physical_resources_check(test, params, env):
     if expected_cpu_nr != actual_cpu_nr:
         n_fail += 1
         logging.error("CPU count mismatch:")
-        logging.error("    Assigned to VM: %s" % expected_cpu_nr)
-        logging.error("    Reported by OS: %s" % actual_cpu_nr)
+        logging.error("    Assigned to VM: %s", expected_cpu_nr)
+        logging.error("    Reported by OS: %s", actual_cpu_nr)
 
     # Check memory size
     logging.info("Memory size check")
@@ -46,8 +46,8 @@  def run_physical_resources_check(test, params, env):
     if actual_mem != expected_mem:
         n_fail += 1
         logging.error("Memory size mismatch:")
-        logging.error("    Assigned to VM: %s" % expected_mem)
-        logging.error("    Reported by OS: %s" % actual_mem)
+        logging.error("    Assigned to VM: %s", expected_mem)
+        logging.error("    Reported by OS: %s", actual_mem)
 
     # Define a function for checking number of hard drivers & NICs
     def check_num(devices, info_cmd, check_str):
@@ -65,20 +65,18 @@  def run_physical_resources_check(test, params, env):
         if expected_num != actual_num:
             f_fail += 1
             logging.error("%s number mismatch:")
-            logging.error("    Assigned to VM: %d" % expected_num)
-            logging.error("    Reported by OS: %d" % actual_num)
+            logging.error("    Assigned to VM: %d", expected_num)
+            logging.error("    Reported by OS: %d", actual_num)
         return expected_num, f_fail
 
     logging.info("Hard drive count check")
-    drives_num, f_fail = check_num("images", "block", "type=hd")
-    n_fail += f_fail
+    n_fail += check_num("images", "block", "type=hd")[1]
 
     logging.info("NIC count check")
-    nics_num, f_fail = check_num("nics", "network", "model=")
-    n_fail += f_fail
+    n_fail += check_num("nics", "network", "model=")[1]
 
     # Define a function for checking hard drives & NICs' model
-    def chk_fmt_model(device, fmt_model, info_cmd, str):
+    def chk_fmt_model(device, fmt_model, info_cmd, regexp):
         f_fail = 0
         devices = params.objects(device)
         for chk_device in devices:
@@ -94,8 +92,8 @@  def run_physical_resources_check(test, params, env):
                 logging.error("info/query monitor command failed (%s)",
                               info_cmd)
 
-            device_found = re.findall(str, o)
-            logging.debug("Found devices: %s" % device_found)
+            device_found = re.findall(regexp, o)
+            logging.debug("Found devices: %s", device_found)
             found = False
             for fm in device_found:
                 if expected in fm:
@@ -104,8 +102,8 @@  def run_physical_resources_check(test, params, env):
             if not found:
                 f_fail += 1
                 logging.error("%s model mismatch:")
-                logging.error("    Assigned to VM: %s" % expected)
-                logging.error("    Reported by OS: %s" % device_found)
+                logging.error("    Assigned to VM: %s", expected)
+                logging.error("    Reported by OS: %s", device_found)
         return f_fail
 
     logging.info("NICs model check")
@@ -125,7 +123,7 @@  def run_physical_resources_check(test, params, env):
         logging.error(e)
         logging.error("info/query monitor command failed (network)")
     found_mac_addresses = re.findall("macaddr=(\S+)", o)
-    logging.debug("Found MAC adresses: %s" % found_mac_addresses)
+    logging.debug("Found MAC adresses: %s", found_mac_addresses)
 
     num_nics = len(params.objects("nics"))
     for nic_index in range(num_nics):
@@ -133,7 +131,7 @@  def run_physical_resources_check(test, params, env):
         if not string.lower(mac) in found_mac_addresses:
             n_fail += 1
             logging.error("MAC address mismatch:")
-            logging.error("    Assigned to VM (not found): %s" % mac)
+            logging.error("    Assigned to VM (not found): %s", mac)
 
     # Define a function to verify UUID & Serial number
     def verify_device(expect, name, verify_cmd):
@@ -143,8 +141,8 @@  def run_physical_resources_check(test, params, env):
             if not string.upper(expect) in actual:
                 f_fail += 1
                 logging.error("%s mismatch:")
-                logging.error("    Assigned to VM: %s" % string.upper(expect))
-                logging.error("    Reported by OS: %s" % actual)
+                logging.error("    Assigned to VM: %s", string.upper(expect))
+                logging.error("    Reported by OS: %s", actual)
         return f_fail
 
     logging.info("UUID check")
diff --git a/client/tests/kvm/tests/ping.py b/client/tests/kvm/tests/ping.py
index 0dbbdf6..8dc4b9e 100644
--- a/client/tests/kvm/tests/ping.py
+++ b/client/tests/kvm/tests/ping.py
@@ -34,7 +34,8 @@  def run_ping(test, params, env):
         for i, nic in enumerate(nics):
             ip = vm.get_address(i)
             if not ip:
-                logging.error("Could not get the ip of nic index %d", i)
+                logging.error("Could not get the ip of nic index %d: %s",
+                              i, nic)
                 continue
 
             for size in packet_size:
diff --git a/client/tests/kvm/tests/pxe.py b/client/tests/kvm/tests/pxe.py
index 026e397..7c294c1 100644
--- a/client/tests/kvm/tests/pxe.py
+++ b/client/tests/kvm/tests/pxe.py
@@ -1,6 +1,6 @@ 
 import logging
 from autotest_lib.client.common_lib import error
-import kvm_subprocess, kvm_test_utils
+import kvm_subprocess
 
 
 def run_pxe(test, params, env):
@@ -20,10 +20,8 @@  def run_pxe(test, params, env):
     timeout = int(params.get("pxe_timeout", 60))
 
     logging.info("Try to boot from PXE")
-    status, output = kvm_subprocess.run_fg("tcpdump -nli %s" % vm.get_ifname(),
-                                           logging.debug,
-                                           "(pxe capture) ",
-                                           timeout)
+    output = kvm_subprocess.run_fg("tcpdump -nli %s" % vm.get_ifname(),
+                                   logging.debug, "(pxe capture) ", timeout)[1]
 
     logging.info("Analyzing the tcpdump result...")
     if not "tftp" in output:
diff --git a/client/tests/kvm/tests/qemu_img.py b/client/tests/kvm/tests/qemu_img.py
index b1df0ea..c3449f4 100644
--- a/client/tests/kvm/tests/qemu_img.py
+++ b/client/tests/kvm/tests/qemu_img.py
@@ -1,6 +1,6 @@ 
-import re, os, logging, commands, string
+import re, os, logging, commands
 from autotest_lib.client.common_lib import utils, error
-import kvm_vm, kvm_utils, kvm_test_utils, kvm_preprocessing
+import kvm_vm, kvm_utils, kvm_preprocessing
 
 
 def run_qemu_img(test, params, env):
@@ -162,13 +162,13 @@  def run_qemu_img(test, params, env):
             os.remove(output_filename)
 
 
-    def _info(cmd, img, string=None, fmt=None):
+    def _info(cmd, img, sub_info=None, fmt=None):
         """
         Simple wrapper of 'qemu-img info'.
 
         @param cmd: qemu-img base command.
         @param img: image file
-        @param string: sub info, say 'backing file'
+        @param sub_info: sub info, say 'backing file'
         @param fmt: image format
         """
         cmd += " info"
@@ -182,11 +182,11 @@  def run_qemu_img(test, params, env):
             logging.error("Get info of image '%s' failed: %s", img, str(e))
             return None
 
-        if not string:
+        if not sub_info:
             return output
 
-        string += ": (.*)"
-        matches = re.findall(string, output)
+        sub_info += ": (.*)"
+        matches = re.findall(sub_info, output)
         if matches:
             return matches[0]
         return None
@@ -223,7 +223,7 @@  def run_qemu_img(test, params, env):
             if s != 0:
                 raise error.TestFail("Create snapshot failed via command: %s;"
                                      "Output is: %s" % (crtcmd, o))
-            logging.info("Created snapshot '%s' in '%s'" % (sn_name,image_name))
+            logging.info("Created snapshot '%s' in '%s'", sn_name, image_name)
         listcmd = cmd
         listcmd += " -l %s" % image_name
         s, o = commands.getstatusoutput(listcmd)
@@ -377,7 +377,7 @@  def run_qemu_img(test, params, env):
         if mode == "unsafe":
             cmd += " -u"
         cmd += " -b %s -F %s %s" % (base_img, backing_fmt, img_name)
-        logging.info("Trying to rebase '%s' to '%s'..." % (img_name, base_img))
+        logging.info("Trying to rebase '%s' to '%s'...", img_name, base_img)
         s, o = commands.getstatusoutput(cmd)
         if s != 0:
             raise error.TestError("Failed to rebase '%s' to '%s': %s" %
diff --git a/client/tests/kvm/tests/qmp_basic.py b/client/tests/kvm/tests/qmp_basic.py
index 94ba9ee..9328c61 100644
--- a/client/tests/kvm/tests/qmp_basic.py
+++ b/client/tests/kvm/tests/qmp_basic.py
@@ -1,5 +1,6 @@ 
-import kvm_test_utils, kvm_monitor
 from autotest_lib.client.common_lib import error
+import kvm_test_utils, kvm_monitor
+
 
 def run_qmp_basic(test, params, env):
     """
@@ -197,24 +198,24 @@  def run_qmp_basic(test, params, env):
         Check that QMP's "id" key is correctly handled.
         """
         # The "id" key must be echoed back in error responses
-        id = "kvm-autotest"
-        resp = monitor.cmd_qmp("eject", { "foobar": True }, id=id)
+        id_key = "kvm-autotest"
+        resp = monitor.cmd_qmp("eject", { "foobar": True }, id=id_key)
         check_error_resp(resp)
-        check_str_key(resp, "id", id)
+        check_str_key(resp, "id", id_key)
 
         # The "id" key must be echoed back in success responses
-        resp = monitor.cmd_qmp("query-status", id=id)
+        resp = monitor.cmd_qmp("query-status", id=id_key)
         check_success_resp(resp)
-        check_str_key(resp, "id", id)
+        check_str_key(resp, "id", id_key)
 
         # The "id" key can be any json-object
-        for id in [ True, 1234, "string again!", [1, [], {}, True, "foo"],
+        for id_key in [ True, 1234, "string again!", [1, [], {}, True, "foo"],
                     { "key": {} } ]:
-            resp = monitor.cmd_qmp("query-status", id=id)
+            resp = monitor.cmd_qmp("query-status", id=id_key)
             check_success_resp(resp)
-            if resp["id"] != id:
+            if resp["id"] != id_key:
                 raise error.TestFail("expected id '%s' but got '%s'" %
-                                     (str(id), str(resp["id"])))
+                                     (str(id_key), str(resp["id"])))
 
 
     def test_invalid_arg_key(monitor):
@@ -366,7 +367,8 @@  def run_qmp_basic(test, params, env):
         # is to skip its checking and pass arguments through. Check this
         # works by providing invalid options to device_add and expecting
         # an error message from qdev
-        resp = monitor.cmd_qmp("device_add", { "driver": "e1000","foo": "bar" })
+        resp = monitor.cmd_qmp("device_add", { "driver": "e1000",
+                                              "foo": "bar" })
         check_error_resp(resp, "PropertyNotFound",
                                {"device": "e1000", "property": "foo"})
 
diff --git a/client/tests/kvm/tests/qmp_basic_rhel6.py b/client/tests/kvm/tests/qmp_basic_rhel6.py
index 1bc0006..24298b8 100644
--- a/client/tests/kvm/tests/qmp_basic_rhel6.py
+++ b/client/tests/kvm/tests/qmp_basic_rhel6.py
@@ -1,5 +1,7 @@ 
-import kvm_test_utils, kvm_monitor
+import logging
 from autotest_lib.client.common_lib import error
+import kvm_monitor
+
 
 def run_qmp_basic_rhel6(test, params, env):
     """
@@ -196,24 +198,24 @@  def run_qmp_basic_rhel6(test, params, env):
         Check that QMP's "id" key is correctly handled.
         """
         # The "id" key must be echoed back in error responses
-        id = "kvm-autotest"
-        resp = monitor.cmd_qmp("eject", { "foobar": True }, id=id)
+        id_key = "kvm-autotest"
+        resp = monitor.cmd_qmp("eject", { "foobar": True }, id=id_key)
         check_error_resp(resp)
-        check_str_key(resp, "id", id)
+        check_str_key(resp, "id", id_key)
 
         # The "id" key must be echoed back in success responses
-        resp = monitor.cmd_qmp("query-status", id=id)
+        resp = monitor.cmd_qmp("query-status", id=id_key)
         check_success_resp(resp)
-        check_str_key(resp, "id", id)
+        check_str_key(resp, "id", id_key)
 
         # The "id" key can be any json-object
-        for id in [ True, 1234, "string again!", [1, [], {}, True, "foo"],
+        for id_key in [ True, 1234, "string again!", [1, [], {}, True, "foo"],
                     { "key": {} } ]:
-            resp = monitor.cmd_qmp("query-status", id=id)
+            resp = monitor.cmd_qmp("query-status", id=id_key)
             check_success_resp(resp)
-            if resp["id"] != id:
+            if resp["id"] != id_key:
                 raise error.TestFail("expected id '%s' but got '%s'" %
-                                     (str(id), str(resp["id"])))
+                                     (str(id_key), str(resp["id"])))
 
 
     def test_invalid_arg_key(monitor):
@@ -284,11 +286,11 @@  def run_qmp_basic_rhel6(test, params, env):
         resp = monitor.cmd_obj({ "arguments": {}, "execute": "query-version" })
         check_success_resp(resp)
 
-        id = "1234foo"
-        resp = monitor.cmd_obj({ "id": id, "execute": "query-version",
+        id_key = "1234foo"
+        resp = monitor.cmd_obj({ "id": id_key, "execute": "query-version",
                                  "arguments": {} })
         check_success_resp(resp)
-        check_str_key(resp, "id", id)
+        check_str_key(resp, "id", id_key)
 
         # TODO: would be good to test simple argument usage, but we don't have
         # a read-only command that accepts arguments.
@@ -347,7 +349,8 @@  def run_qmp_basic_rhel6(test, params, env):
         # is to skip its checking and pass arguments through. Check this
         # works by providing invalid options to device_add and expecting
         # an error message from qdev
-        resp = monitor.cmd_qmp("device_add", { "driver": "e1000","foo": "bar" })
+        resp = monitor.cmd_qmp("device_add", {"driver": "e1000",
+                                              "foo": "bar" })
         check_error_resp(resp, "PropertyNotFound",
                                {"device": "e1000", "property": "foo"})
 
diff --git a/client/tests/kvm/tests/set_link.py b/client/tests/kvm/tests/set_link.py
index b31aa77..d73a1b8 100644
--- a/client/tests/kvm/tests/set_link.py
+++ b/client/tests/kvm/tests/set_link.py
@@ -32,7 +32,7 @@  def run_set_link(test, params, env):
     if ratio != 0:
         raise error.TestFail("Loss ratio is %s, output: %s" % (ratio, o))
 
-    logging.info("Executing 'set link %s off'" % linkname)
+    logging.info("Executing 'set link %s off'", linkname)
     vm.monitor.cmd("set_link %s off" % linkname)
     logging.info(vm.monitor.info("network"))
     logging.info("Pinging guest from host")
@@ -45,7 +45,7 @@  def run_set_link(test, params, env):
         raise error.TestFail("Loss ratio is not 100%%,"
                              "Loss ratio is %s" % ratio)
 
-    logging.info("Executing 'set link %s on'" % linkname)
+    logging.info("Executing 'set link %s on'", linkname)
     vm.monitor.cmd("set_link %s on" % linkname)
     logging.info(vm.monitor.info("network"))
     logging.info("Pinging guest from host")
diff --git a/client/tests/kvm/tests/stepmaker.py b/client/tests/kvm/tests/stepmaker.py
index 9f6d9b2..5a9acdc 100755
--- a/client/tests/kvm/tests/stepmaker.py
+++ b/client/tests/kvm/tests/stepmaker.py
@@ -7,10 +7,10 @@  Step file creator/editor.
 @version: "20090401"
 """
 
-import pygtk, gtk, gobject, time, os, commands
+import pygtk, gtk, gobject, time, os, commands, logging
 import common
 from autotest_lib.client.common_lib import error
-import kvm_utils, logging, ppm_utils, stepeditor, kvm_monitor
+import kvm_utils, ppm_utils, stepeditor, kvm_monitor
 pygtk.require('2.0')
 
 
diff --git a/client/tests/kvm/tests/steps.py b/client/tests/kvm/tests/steps.py
index 5d4ed25..91b864d 100644
--- a/client/tests/kvm/tests/steps.py
+++ b/client/tests/kvm/tests/steps.py
@@ -4,9 +4,9 @@  Utilities to perform automatic guest installation using step files.
 @copyright: Red Hat 2008-2009
 """
 
-import os, time, re, shutil, logging
-from autotest_lib.client.common_lib import utils, error
-import kvm_utils, ppm_utils, kvm_subprocess, kvm_monitor
+import os, time, shutil, logging
+from autotest_lib.client.common_lib import error
+import kvm_utils, ppm_utils, kvm_monitor
 try:
     import PIL.Image
 except ImportError:
@@ -97,7 +97,7 @@  def barrier_2(vm, words, params, debug_dir, data_scrdump_filename,
         # Make sure image is valid
         if not ppm_utils.image_verify_ppm_file(scrdump_filename):
             logging.warn("Got invalid screendump: dimensions: %dx%d, "
-                         "data size: %d" % (w, h, len(data)))
+                         "data size: %d", w, h, len(data))
             continue
 
         # Compute md5sum of whole image
@@ -231,7 +231,7 @@  def run_steps(test, params, env):
             vm.send_key(words[1])
         elif words[0] == "var":
             if not handle_var(vm, params, words[1]):
-                logging.error("Variable not defined: %s" % words[1])
+                logging.error("Variable not defined: %s", words[1])
         elif words[0] == "barrier_2":
             if current_screendump:
                 scrdump_filename = os.path.join(
diff --git a/client/tests/kvm/tests/stress_boot.py b/client/tests/kvm/tests/stress_boot.py
index 15ebd20..0c422c0 100644
--- a/client/tests/kvm/tests/stress_boot.py
+++ b/client/tests/kvm/tests/stress_boot.py
@@ -1,6 +1,6 @@ 
-import logging, time
+import logging
 from autotest_lib.client.common_lib import error
-import kvm_subprocess, kvm_test_utils, kvm_utils, kvm_preprocessing
+import kvm_preprocessing
 
 
 @error.context_aware
@@ -39,7 +39,7 @@  def run_stress_boot(test, params, env):
             params["vms"] += " " + vm_name
 
             sessions.append(curr_vm.wait_for_login(timeout=login_timeout))
-            logging.info("Guest #%d booted up successfully" % num)
+            logging.info("Guest #%d booted up successfully", num)
 
             # Check whether all previous shell sessions are responsive
             for i, se in enumerate(sessions):
diff --git a/client/tests/kvm/tests/timedrift.py b/client/tests/kvm/tests/timedrift.py
index 348efb8..9f62b4a 100644
--- a/client/tests/kvm/tests/timedrift.py
+++ b/client/tests/kvm/tests/timedrift.py
@@ -1,6 +1,6 @@ 
-import logging, time, commands, re
+import logging, time, commands
 from autotest_lib.client.common_lib import error
-import kvm_subprocess, kvm_test_utils, kvm_utils
+import kvm_subprocess, kvm_test_utils
 
 
 def run_timedrift(test, params, env):
@@ -122,7 +122,7 @@  def run_timedrift(test, params, env):
                 set_cpu_affinity(pid, cpu_mask)
 
             # Sleep for a while (during load)
-            logging.info("Sleeping for %s seconds..." % load_duration)
+            logging.info("Sleeping for %s seconds...", load_duration)
             time.sleep(load_duration)
 
             # Get time delta after load
@@ -135,9 +135,9 @@  def run_timedrift(test, params, env):
             host_delta = ht1 - ht0
             guest_delta = gt1 - gt0
             drift = 100.0 * (host_delta - guest_delta) / host_delta
-            logging.info("Host duration: %.2f" % host_delta)
-            logging.info("Guest duration: %.2f" % guest_delta)
-            logging.info("Drift: %.2f%%" % drift)
+            logging.info("Host duration: %.2f", host_delta)
+            logging.info("Guest duration: %.2f", guest_delta)
+            logging.info("Drift: %.2f%%", drift)
 
         finally:
             logging.info("Cleaning up...")
@@ -153,7 +153,7 @@  def run_timedrift(test, params, env):
                 load_session.close()
 
         # Sleep again (rest)
-        logging.info("Sleeping for %s seconds..." % rest_duration)
+        logging.info("Sleeping for %s seconds...", rest_duration)
         time.sleep(rest_duration)
 
         # Get time after rest
@@ -169,9 +169,9 @@  def run_timedrift(test, params, env):
     host_delta_total = ht2 - ht0
     guest_delta_total = gt2 - gt0
     drift_total = 100.0 * (host_delta_total - guest_delta_total) / host_delta
-    logging.info("Total host duration including rest: %.2f" % host_delta_total)
-    logging.info("Total guest duration including rest: %.2f" % guest_delta_total)
-    logging.info("Total drift after rest: %.2f%%" % drift_total)
+    logging.info("Total host duration including rest: %.2f", host_delta_total)
+    logging.info("Total guest duration including rest: %.2f", guest_delta_total)
+    logging.info("Total drift after rest: %.2f%%", drift_total)
 
     # Fail the test if necessary
     if abs(drift) > drift_threshold:
diff --git a/client/tests/kvm/tests/timedrift_with_migration.py b/client/tests/kvm/tests/timedrift_with_migration.py
index 66e8fde..b1d4f3e 100644
--- a/client/tests/kvm/tests/timedrift_with_migration.py
+++ b/client/tests/kvm/tests/timedrift_with_migration.py
@@ -1,6 +1,6 @@ 
-import logging, time, commands, re
+import logging
 from autotest_lib.client.common_lib import error
-import kvm_subprocess, kvm_test_utils, kvm_utils
+import kvm_test_utils
 
 
 def run_timedrift_with_migration(test, params, env):
@@ -46,8 +46,8 @@  def run_timedrift_with_migration(test, params, env):
                                                    time_filter_re, time_format)
             session.close()
             # Run current iteration
-            logging.info("Migrating: iteration %d of %d..." %
-                         (i + 1, migration_iterations))
+            logging.info("Migrating: iteration %d of %d...",
+                         (i + 1), migration_iterations)
             vm.migrate()
             # Log in
             logging.info("Logging in after migration...")
@@ -60,12 +60,12 @@  def run_timedrift_with_migration(test, params, env):
             host_delta = ht1_ - ht0_
             guest_delta = gt1_ - gt0_
             drift = abs(host_delta - guest_delta)
-            logging.info("Host duration (iteration %d): %.2f" %
-                         (i + 1, host_delta))
-            logging.info("Guest duration (iteration %d): %.2f" %
-                         (i + 1, guest_delta))
-            logging.info("Drift at iteration %d: %.2f seconds" %
-                         (i + 1, drift))
+            logging.info("Host duration (iteration %d): %.2f",
+                         (i + 1), host_delta)
+            logging.info("Guest duration (iteration %d): %.2f",
+                         (i + 1), guest_delta)
+            logging.info("Drift at iteration %d: %.2f seconds",
+                         (i + 1), drift)
             # Fail if necessary
             if drift > drift_threshold_single:
                 raise error.TestFail("Time drift too large at iteration %d: "
@@ -83,12 +83,12 @@  def run_timedrift_with_migration(test, params, env):
     host_delta = ht1 - ht0
     guest_delta = gt1 - gt0
     drift = abs(host_delta - guest_delta)
-    logging.info("Host duration (%d migrations): %.2f" %
-                 (migration_iterations, host_delta))
-    logging.info("Guest duration (%d migrations): %.2f" %
-                 (migration_iterations, guest_delta))
-    logging.info("Drift after %d migrations: %.2f seconds" %
-                 (migration_iterations, drift))
+    logging.info("Host duration (%d migrations): %.2f",
+                 migration_iterations, host_delta)
+    logging.info("Guest duration (%d migrations): %.2f",
+                 migration_iterations, guest_delta)
+    logging.info("Drift after %d migrations: %.2f seconds",
+                 migration_iterations, drift)
 
     # Fail if necessary
     if drift > drift_threshold:
diff --git a/client/tests/kvm/tests/timedrift_with_reboot.py b/client/tests/kvm/tests/timedrift_with_reboot.py
index a1ab5f3..05ef21f 100644
--- a/client/tests/kvm/tests/timedrift_with_reboot.py
+++ b/client/tests/kvm/tests/timedrift_with_reboot.py
@@ -1,6 +1,6 @@ 
-import logging, time, commands, re
+import logging
 from autotest_lib.client.common_lib import error
-import kvm_subprocess, kvm_test_utils, kvm_utils
+import kvm_test_utils
 
 
 def run_timedrift_with_reboot(test, params, env):
@@ -45,8 +45,8 @@  def run_timedrift_with_reboot(test, params, env):
             (ht0_, gt0_) = kvm_test_utils.get_time(session, time_command,
                                                    time_filter_re, time_format)
             # Run current iteration
-            logging.info("Rebooting: iteration %d of %d..." %
-                         (i + 1, reboot_iterations))
+            logging.info("Rebooting: iteration %d of %d...",
+                         (i + 1), reboot_iterations)
             session = vm.reboot(session)
             # Get time after current iteration
             (ht1_, gt1_) = kvm_test_utils.get_time(session, time_command,
@@ -55,12 +55,12 @@  def run_timedrift_with_reboot(test, params, env):
             host_delta = ht1_ - ht0_
             guest_delta = gt1_ - gt0_
             drift = abs(host_delta - guest_delta)
-            logging.info("Host duration (iteration %d): %.2f" %
-                         (i + 1, host_delta))
-            logging.info("Guest duration (iteration %d): %.2f" %
-                         (i + 1, guest_delta))
-            logging.info("Drift at iteration %d: %.2f seconds" %
-                         (i + 1, drift))
+            logging.info("Host duration (iteration %d): %.2f",
+                         (i + 1), host_delta)
+            logging.info("Guest duration (iteration %d): %.2f",
+                         (i + 1), guest_delta)
+            logging.info("Drift at iteration %d: %.2f seconds",
+                         (i + 1), drift)
             # Fail if necessary
             if drift > drift_threshold_single:
                 raise error.TestFail("Time drift too large at iteration %d: "
@@ -78,12 +78,12 @@  def run_timedrift_with_reboot(test, params, env):
     host_delta = ht1 - ht0
     guest_delta = gt1 - gt0
     drift = abs(host_delta - guest_delta)
-    logging.info("Host duration (%d reboots): %.2f" %
-                 (reboot_iterations, host_delta))
-    logging.info("Guest duration (%d reboots): %.2f" %
-                 (reboot_iterations, guest_delta))
-    logging.info("Drift after %d reboots: %.2f seconds" %
-                 (reboot_iterations, drift))
+    logging.info("Host duration (%d reboots): %.2f",
+                 reboot_iterations, host_delta)
+    logging.info("Guest duration (%d reboots): %.2f",
+                 reboot_iterations, guest_delta)
+    logging.info("Drift after %d reboots: %.2f seconds",
+                 reboot_iterations, drift)
 
     # Fail if necessary
     if drift > drift_threshold:
diff --git a/client/tests/kvm/tests/timedrift_with_stop.py b/client/tests/kvm/tests/timedrift_with_stop.py
index cf396cb..9f51ff9 100644
--- a/client/tests/kvm/tests/timedrift_with_stop.py
+++ b/client/tests/kvm/tests/timedrift_with_stop.py
@@ -1,6 +1,6 @@ 
-import logging, time, commands, re
+import logging, time
 from autotest_lib.client.common_lib import error
-import kvm_subprocess, kvm_test_utils, kvm_utils
+import kvm_test_utils
 
 
 def run_timedrift_with_stop(test, params, env):
@@ -49,8 +49,8 @@  def run_timedrift_with_stop(test, params, env):
             (ht0_, gt0_) = kvm_test_utils.get_time(session, time_command,
                                                    time_filter_re, time_format)
             # Run current iteration
-            logging.info("Stop %s second: iteration %d of %d..." %
-                         (stop_time, i + 1, stop_iterations))
+            logging.info("Stop %s second: iteration %d of %d...",
+                         stop_time, (i + 1), stop_iterations)
 
             vm.monitor.cmd("stop")
             time.sleep(stop_time)
@@ -67,12 +67,12 @@  def run_timedrift_with_stop(test, params, env):
             host_delta = ht1_ - ht0_
             guest_delta = gt1_ - gt0_
             drift = abs(host_delta - guest_delta)
-            logging.info("Host duration (iteration %d): %.2f" %
-                         (i + 1, host_delta))
-            logging.info("Guest duration (iteration %d): %.2f" %
-                         (i + 1, guest_delta))
-            logging.info("Drift at iteration %d: %.2f seconds" %
-                         (i + 1, drift))
+            logging.info("Host duration (iteration %d): %.2f",
+                         (i + 1), host_delta)
+            logging.info("Guest duration (iteration %d): %.2f",
+                         (i + 1), guest_delta)
+            logging.info("Drift at iteration %d: %.2f seconds",
+                         (i + 1), drift)
             # Fail if necessary
             if drift > drift_threshold_single:
                 raise error.TestFail("Time drift too large at iteration %d: "
@@ -90,12 +90,12 @@  def run_timedrift_with_stop(test, params, env):
     host_delta = ht1 - ht0
     guest_delta = gt1 - gt0
     drift = abs(host_delta - guest_delta)
-    logging.info("Host duration (%d stops): %.2f" %
-                 (stop_iterations, host_delta))
-    logging.info("Guest duration (%d stops): %.2f" %
-                 (stop_iterations, guest_delta))
-    logging.info("Drift after %d stops: %.2f seconds" %
-                 (stop_iterations, drift))
+    logging.info("Host duration (%d stops): %.2f",
+                 stop_iterations, host_delta)
+    logging.info("Guest duration (%d stops): %.2f",
+                 stop_iterations, guest_delta)
+    logging.info("Drift after %d stops: %.2f seconds",
+                 stop_iterations, drift)
 
     # Fail if necessary
     if drift > drift_threshold:
diff --git a/client/tests/kvm/tests/unattended_install.py b/client/tests/kvm/tests/unattended_install.py
index 7658839..7c6d845 100644
--- a/client/tests/kvm/tests/unattended_install.py
+++ b/client/tests/kvm/tests/unattended_install.py
@@ -1,6 +1,6 @@ 
 import logging, time, socket, re
 from autotest_lib.client.common_lib import error
-import kvm_utils, kvm_test_utils, kvm_vm
+import kvm_vm
 
 
 @error.context_aware
diff --git a/client/tests/kvm/tests/unittest.py b/client/tests/kvm/tests/unittest.py
index c724051..9a126a5 100644
--- a/client/tests/kvm/tests/unittest.py
+++ b/client/tests/kvm/tests/unittest.py
@@ -1,6 +1,6 @@ 
-import logging, time, os, shutil, glob, ConfigParser
+import logging, os, shutil, glob, ConfigParser
 from autotest_lib.client.common_lib import error
-import kvm_subprocess, kvm_test_utils, kvm_utils, kvm_preprocessing
+import kvm_utils, kvm_preprocessing
 
 
 def run_unittest(test, params, env):
@@ -34,12 +34,12 @@  def run_unittest(test, params, env):
     if not test_list:
         raise error.TestError("No tests listed on config file %s" %
                               unittest_cfg)
-    logging.debug('Unit test list: %s' % test_list)
+    logging.debug('Unit test list: %s', test_list)
 
     if params.get('test_list'):
         test_list = params.get('test_list').split()
         logging.info('Original test list overriden by user')
-        logging.info('User defined unit test list: %s' % test_list)
+        logging.info('User defined unit test list: %s', test_list)
 
     nfail = 0
     tests_failed = []
@@ -51,23 +51,23 @@  def run_unittest(test, params, env):
     for t in test_list:
         logging.info('Running %s', t)
 
-        file = None
+        flat_file = None
         if parser.has_option(t, 'file'):
-            file = parser.get(t, 'file')
+            flat_file = parser.get(t, 'file')
 
-        if file is None:
+        if flat_file is None:
             nfail += 1
             tests_failed.append(t)
             logging.error('Unittest config file %s has section %s but no '
-                          'mandatory option file' % (unittest_cfg, t))
+                          'mandatory option file', unittest_cfg, t)
             continue
 
-        if file not in unittest_list:
+        if flat_file not in unittest_list:
             nfail += 1
             tests_failed.append(t)
             logging.error('Unittest file %s referenced in config file %s but '
-                          'was not find under the unittest dir' %
-                          (file, unittest_cfg))
+                          'was not find under the unittest dir', flat_file,
+                          unittest_cfg)
             continue
 
         smp = None
@@ -81,7 +81,7 @@  def run_unittest(test, params, env):
             params['extra_params'] += ' %s' % extra_params
 
         vm_name = params.get("main_vm")
-        params['kernel'] = os.path.join(unittest_dir, file)
+        params['kernel'] = os.path.join(unittest_dir, flat_file)
         testlog_path = os.path.join(test.debugdir, "%s.log" % t)
 
         try:
@@ -111,7 +111,7 @@  def run_unittest(test, params, env):
                 shutil.copy(vm.get_testlog_filename(), testlog_path)
                 logging.info("Unit test log collected and available under %s",
                              testlog_path)
-            except NameError, IOError:
+            except (NameError, IOError):
                 logging.error("Not possible to collect logs")
 
         # Restore the extra params so other tests can run normally
diff --git a/client/tests/kvm/tests/unittest_kvmctl.py b/client/tests/kvm/tests/unittest_kvmctl.py
index 4afd862..dd72cb2 100644
--- a/client/tests/kvm/tests/unittest_kvmctl.py
+++ b/client/tests/kvm/tests/unittest_kvmctl.py
@@ -21,7 +21,7 @@  def run_unittest_kvmctl(test, params, env):
     cmd = "./kvmctl test/x86/bootstrap test/x86/%s.flat" % case
     try:
         results = utils.system_output(cmd)
-    except error.CmdError, e:
+    except error.CmdError:
         raise error.TestFail("Unit test %s failed" % case)
 
     result_file = os.path.join(test.resultsdir, case)
diff --git a/client/tests/kvm/tests/virtio_console.py b/client/tests/kvm/tests/virtio_console.py
index b1e8bc6..af32bf2 100644
--- a/client/tests/kvm/tests/virtio_console.py
+++ b/client/tests/kvm/tests/virtio_console.py
@@ -8,9 +8,9 @@  import threading, time, traceback
 from collections import deque
 from threading import Thread
 
-import kvm_subprocess, kvm_test_utils, kvm_utils, kvm_preprocessing
 from autotest_lib.client.common_lib import error
 from autotest_lib.client.bin import utils
+import kvm_subprocess, kvm_test_utils, kvm_preprocessing
 
 
 def run_virtio_console(test, params, env):
@@ -73,7 +73,7 @@  def run_virtio_console(test, params, env):
                 args = []
             res = [None, function.func_name, args]
             try:
-                logging.debug("Start test %s." % function.func_name)
+                logging.debug("Start test %s.", function.func_name)
                 ret = function(*args)
                 res[0] = True
                 logging.info(self.result_to_string(res))
@@ -371,16 +371,16 @@  def run_virtio_console(test, params, env):
         """
         Random data receiver/checker thread.
         """
-        def __init__(self, port, buffer, event, blocklen=1024):
+        def __init__(self, port, buf, event, blocklen=1024):
             """
             @param port: Source port.
-            @param buffer: Control data buffer (FIFO).
+            @param buf: Control data buffer (FIFO).
             @param length: Amount of data we want to receive.
             @param blocklen: Block length.
             """
             Thread.__init__(self)
             self.port = port
-            self.buffer = buffer
+            self.buffer = buf
             self.exitevent = event
             self.blocklen = blocklen
             self.idx = 0
@@ -466,10 +466,10 @@  def run_virtio_console(test, params, env):
 
     def init_guest(vm, consoles):
         """
-        Prepares guest, executes virtio_console_guest.py and initialize for testing
+        Prepares guest, executes virtio_console_guest.py and initializes test.
 
         @param vm: Informations about the guest.
-        @param consoles: Informations about consoles
+        @param consoles: Informations about consoles.
         """
         conss = []
         for mode in consoles:
@@ -934,7 +934,7 @@  def run_virtio_console(test, params, env):
         for param in params.split(';'):
             if not param:
                 continue
-            logging.info("test_loopback: params: %s" % (param))
+            logging.info("test_loopback: params: %s", param)
             param = param.split(':')
             idx_serialport = 0
             idx_console = 0
@@ -1039,7 +1039,7 @@  def run_virtio_console(test, params, env):
         for param in params.split(';'):
             if not param:
                 continue
-            logging.info("test_perf: params: %s" % (param))
+            logging.info("test_perf: params: %s", param)
             param = param.split(':')
             duration = 60.0
             if len(param) > 1:
@@ -1063,7 +1063,7 @@  def run_virtio_console(test, params, env):
                 data += "%c" % random.randrange(255)
 
             exit_event = threading.Event()
-            slice = float(duration) / 100
+            time_slice = float(duration) / 100
 
             # HOST -> GUEST
             on_guest('virt.loopback(["%s"], [], %d, virt.LOOP_NONE)' %
@@ -1077,7 +1077,7 @@  def run_virtio_console(test, params, env):
             thread.start()
             for i in range(100):
                 stats.append(thread.idx)
-                time.sleep(slice)
+                time.sleep(time_slice)
             _time = time.time() - _time - duration
             logging.info("\n" + loads.get_cpu_status_string()[:-1])
             logging.info("\n" + loads.get_mem_status_string()[:-1])
@@ -1091,12 +1091,12 @@  def run_virtio_console(test, params, env):
 
             _guest_exit_threads(vm, [port], [])
 
-            if (_time > slice):
+            if (_time > time_slice):
                 logging.error(
-                "Test ran %fs longer which is more than one slice", _time)
+                "Test ran %fs longer which is more than one time slice", _time)
             else:
                 logging.debug("Test ran %fs longer", _time)
-            stats = process_stats(stats[1:], slice * 1048576)
+            stats = process_stats(stats[1:], time_slice * 1048576)
             logging.debug("Stats = %s", stats)
             logging.info("Host -> Guest [MB/s] (min/med/max) = %.3f/%.3f/%.3f",
                         stats[0], stats[len(stats) / 2], stats[-1])
@@ -1115,19 +1115,19 @@  def run_virtio_console(test, params, env):
             _time = time.time()
             for i in range(100):
                 stats.append(thread.idx)
-                time.sleep(slice)
+                time.sleep(time_slice)
             _time = time.time() - _time - duration
             logging.info("\n" + loads.get_cpu_status_string()[:-1])
             logging.info("\n" + loads.get_mem_status_string()[:-1])
             on_guest("virt.exit_threads()", vm, 10)
             exit_event.set()
             thread.join()
-            if (_time > slice): # Deviation is higher than 1 slice
+            if (_time > time_slice): # Deviation is higher than 1 time_slice
                 logging.error(
-                "Test ran %fs longer which is more than one slice", _time)
+                "Test ran %fs longer which is more than one time slice", _time)
             else:
-                logging.debug("Test ran %fs longer" % _time)
-            stats = process_stats(stats[1:], slice * 1048576)
+                logging.debug("Test ran %fs longer", _time)
+            stats = process_stats(stats[1:], time_slice * 1048576)
             logging.debug("Stats = %s", stats)
             logging.info("Guest -> Host [MB/s] (min/med/max) = %.3f/%.3f/%.3f",
                          stats[0], stats[len(stats) / 2], stats[-1])
diff --git a/client/tests/kvm/tests/vlan.py b/client/tests/kvm/tests/vlan.py
index 19a9250..b7cfda2 100644
--- a/client/tests/kvm/tests/vlan.py
+++ b/client/tests/kvm/tests/vlan.py
@@ -2,6 +2,7 @@  import logging, time, re
 from autotest_lib.client.common_lib import error
 import kvm_test_utils, kvm_utils, kvm_subprocess
 
+
 def run_vlan(test, params, env):
     """
     Test 802.1Q vlan of NIC, config it by vconfig command.
@@ -35,20 +36,20 @@  def run_vlan(test, params, env):
     for vm_ in vm:
         vm_.verify_alive()
 
-    def add_vlan(session, id, iface="eth0"):
-        session.cmd("vconfig add %s %s" % (iface, id))
+    def add_vlan(session, v_id, iface="eth0"):
+        session.cmd("vconfig add %s %s" % (iface, v_id))
 
-    def set_ip_vlan(session, id, ip, iface="eth0"):
-        iface = "%s.%s" % (iface, id)
+    def set_ip_vlan(session, v_id, ip, iface="eth0"):
+        iface = "%s.%s" % (iface, v_id)
         session.cmd("ifconfig %s %s" % (iface, ip))
 
     def set_arp_ignore(session, iface="eth0"):
         ignore_cmd = "echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore"
         session.cmd(ignore_cmd)
 
-    def rem_vlan(session, id, iface="eth0"):
+    def rem_vlan(session, v_id, iface="eth0"):
         rem_vlan_cmd = "if [[ -e /proc/net/vlan/%s ]];then vconfig rem %s;fi"
-        iface = "%s.%s" % (iface, id)
+        iface = "%s.%s" % (iface, v_id)
         return session.cmd_status(rem_vlan_cmd % (iface, iface))
 
     def nc_transfer(src, dst):
@@ -72,10 +73,10 @@  def run_vlan(test, params, env):
         output = session[dst].cmd_output("md5sum receive").strip()
         digest_receive = re.findall(r'(\w+)', output)[0]
         if digest_receive == digest_origin[src]:
-            logging.info("file succeed received in vm %s" % vlan_ip[dst])
+            logging.info("file succeed received in vm %s", vlan_ip[dst])
         else:
-            logging.info("digest_origin is  %s" % digest_origin[src])
-            logging.info("digest_receive is %s" % digest_receive)
+            logging.info("digest_origin is  %s", digest_origin[src])
+            logging.info("digest_receive is %s", digest_receive)
             raise error.TestFail("File transfered differ from origin")
         session[dst].cmd_output("rm -f receive")
 
@@ -113,7 +114,7 @@  def run_vlan(test, params, env):
             set_arp_ignore(session[i], ifname[i])
 
         for vlan in range(1, vlan_num+1):
-            logging.info("Test for vlan %s" % vlan)
+            logging.info("Test for vlan %s", vlan)
 
             logging.info("Ping between vlans")
             interface = ifname[0] + '.' + str(vlan)
@@ -142,8 +143,8 @@  def run_vlan(test, params, env):
                                    session=session_flood, timeout=10)
                 session_flood.close()
 
-            flood_ping(0,1)
-            flood_ping(1,0)
+            flood_ping(0, 1)
+            flood_ping(1, 0)
 
             logging.info("Transfering data through nc")
             nc_transfer(0, 1)
@@ -153,7 +154,7 @@  def run_vlan(test, params, env):
         for vlan in range(1, vlan_num+1):
             rem_vlan(session[0], vlan, ifname[0])
             rem_vlan(session[1], vlan, ifname[1])
-            logging.info("rem vlan: %s" % vlan)
+            logging.info("rem vlan: %s", vlan)
 
     # Plumb/unplumb maximal unber of vlan interfaces
     i = 1
diff --git a/client/tests/kvm/tests/vmstop.py b/client/tests/kvm/tests/vmstop.py
index 1dd6dcf..74ecb23 100644
--- a/client/tests/kvm/tests/vmstop.py
+++ b/client/tests/kvm/tests/vmstop.py
@@ -1,7 +1,7 @@ 
 import logging, time, os
 from autotest_lib.client.common_lib import error
 from autotest_lib.client.bin import utils
-import kvm_subprocess, kvm_test_utils, kvm_utils
+import kvm_utils
 
 
 def run_vmstop(test, params, env):
diff --git a/client/tests/kvm/tests/whql_client_install.py b/client/tests/kvm/tests/whql_client_install.py
index 1459793..f5d725d 100644
--- a/client/tests/kvm/tests/whql_client_install.py
+++ b/client/tests/kvm/tests/whql_client_install.py
@@ -1,6 +1,6 @@ 
-import logging, time, os, re
+import logging, time, os
 from autotest_lib.client.common_lib import error
-import kvm_subprocess, kvm_test_utils, kvm_utils, rss_file_transfer
+import kvm_test_utils, kvm_utils, rss_file_transfer
 
 
 def run_whql_client_install(test, params, env):
@@ -82,19 +82,19 @@  def run_whql_client_install(test, params, env):
 
     # Rename the client machine
     client_name = "autotest_%s" % kvm_utils.generate_random_string(4)
-    logging.info("Renaming client machine to '%s'" % client_name)
+    logging.info("Renaming client machine to '%s'", client_name)
     cmd = ('wmic computersystem where name="%%computername%%" rename name="%s"'
            % client_name)
     session.cmd(cmd, timeout=600)
 
     # Join the server's workgroup
-    logging.info("Joining workgroup '%s'" % server_workgroup)
+    logging.info("Joining workgroup '%s'", server_workgroup)
     cmd = ('wmic computersystem where name="%%computername%%" call '
            'joindomainorworkgroup name="%s"' % server_workgroup)
     session.cmd(cmd, timeout=600)
 
     # Set the client machine's DNS suffix
-    logging.info("Setting DNS suffix to '%s'" % server_dns_suffix)
+    logging.info("Setting DNS suffix to '%s'", server_dns_suffix)
     cmd = 'reg add %s /v Domain /d "%s" /f' % (regkey, server_dns_suffix)
     session.cmd(cmd, timeout=300)
 
diff --git a/client/tests/kvm/tests/whql_submission.py b/client/tests/kvm/tests/whql_submission.py
index 6a9407f..c3621c4 100644
--- a/client/tests/kvm/tests/whql_submission.py
+++ b/client/tests/kvm/tests/whql_submission.py
@@ -1,6 +1,6 @@ 
-import logging, time, os, re
+import logging, os, re
 from autotest_lib.client.common_lib import error
-import kvm_subprocess, kvm_test_utils, kvm_utils, rss_file_transfer
+import kvm_subprocess, kvm_utils, rss_file_transfer
 
 
 def run_whql_submission(test, params, env):
@@ -249,7 +249,7 @@  def run_whql_submission(test, params, env):
                                          r["pass"], r["fail"], r["notrun"],
                                          r["notapplicable"]))
         f.close()
-        logging.info("(see logs and HTML reports in %s)" % test.debugdir)
+        logging.info("(see logs and HTML reports in %s)", test.debugdir)
 
     # Kill the client VMs and fail if the automation program did not terminate
     # on time
diff --git a/client/tests/kvm/tests/yum_update.py b/client/tests/kvm/tests/yum_update.py
index 849a67a..7c9b96c 100644
--- a/client/tests/kvm/tests/yum_update.py
+++ b/client/tests/kvm/tests/yum_update.py
@@ -1,6 +1,4 @@ 
 import logging, time
-from autotest_lib.client.common_lib import error
-import kvm_subprocess, kvm_test_utils, kvm_utils
 
 
 def internal_yum_update(session, command, prompt, timeout):
@@ -16,8 +14,9 @@  def internal_yum_update(session, command, prompt, timeout):
     session.sendline(command)
     end_time = time.time() + timeout
     while time.time() < end_time:
-        (match, text) = session.read_until_last_line_matches(
-                        ["[Ii]s this [Oo][Kk]", prompt], timeout=timeout)
+        match = session.read_until_last_line_matches(
+                                                ["[Ii]s this [Oo][Kk]", prompt],
+                                                timeout=timeout)[0]
         if match == 0:
             logging.info("Got 'Is this ok'; sending 'y'")
             session.sendline("y")