diff mbox

[4/4] KVM test: Modify kvm_utils.run_tests to include non fatal failures

Message ID 1302275250-6215-5-git-send-email-lmr@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Lucas Meneghel Rodrigues April 8, 2011, 3:07 p.m. UTC
So now if a error.TestWarn is thrown, dependent tests can run
just fine.

Signed-off-by: Lucas Meneghel Rodrigues <lmr@redhat.com>
---
 client/tests/kvm/kvm_utils.py |   15 ++++++++++-----
 1 files changed, 10 insertions(+), 5 deletions(-)
diff mbox

Patch

diff --git a/client/tests/kvm/kvm_utils.py b/client/tests/kvm/kvm_utils.py
index ff9ee17..8b908ff 100644
--- a/client/tests/kvm/kvm_utils.py
+++ b/client/tests/kvm/kvm_utils.py
@@ -1150,7 +1150,10 @@  def run_tests(parser, job):
             for test_name in status_dict.keys():
                 if not dep in test_name:
                     continue
-                if not status_dict[test_name]:
+                # So the only really non-fatal state is WARN,
+                # All the others make it not safe to proceed with dependency
+                # execution
+                if status_dict[test_name] not in ['GOOD', 'WARN']:
                     dependencies_satisfied = False
                     break
         if dependencies_satisfied:
@@ -1163,8 +1166,9 @@  def run_tests(parser, job):
 
             # We need only one execution, profiled, hence we're passing
             # the profile_only parameter to job.run_test().
-            current_status = job.run_test("kvm", params=dict, tag=test_tag,
-                                          iterations=test_iterations,
+            current_status = job.run_test_detail("kvm", params=dict,
+                                                 tag=test_tag,
+                                                 iterations=test_iterations,
                                           profile_only= bool(profilers) or None)
 
             for profiler in profilers:
@@ -1175,8 +1179,9 @@  def run_tests(parser, job):
         else:
             # We will force the test to fail as TestNA during preprocessing
             dict['dependency_failed'] = 'yes'
-            current_status = job.run_test("kvm", params=dict, tag=test_tag,
-                                          iterations=test_iterations,
+            current_status = job.run_test_detail("kvm", params=dict,
+                                                 tag=test_tag,
+                                                 iterations=test_iterations,
                                           profile_only= bool(profilers) or None)
         status_dict[dict.get("name")] = current_status