diff mbox

[v3,4/6] KVM: Optimize vcpu->requests checking

Message ID 1341853545-3023-5-git-send-email-avi@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Avi Kivity July 9, 2012, 5:05 p.m. UTC
Instead of checking for each request linearly, use for_each_set_bit() to
iterate on just the requests that are set (should be 0 or 1 most of the
time).

To avoid a useless call to find_first_bit(), add an extra check for no
requests set.  To avoid an extra indent and an unreviewable patch, I
added a rather ugly goto.  This can be fixed in a later patch.

Signed-off-by: Avi Kivity <avi@redhat.com>
---
 arch/x86/kvm/x86.c | 62 ++++++++++++++++++++++++++++++++++--------------------
 1 file changed, 39 insertions(+), 23 deletions(-)
diff mbox

Patch

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 162231f..9296dce 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5217,6 +5217,7 @@  static void process_nmi(struct kvm_vcpu *vcpu)
 
 static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 {
+	unsigned req;
 	int r;
 	bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
 		vcpu->run->request_interrupt_window;
@@ -5225,57 +5226,67 @@  static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 	if (unlikely(req_int_win))
 		kvm_make_request(KVM_REQ_EVENT, vcpu);
 
-	if (vcpu->requests) {
-		if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
+	if (!vcpu->requests)
+		goto no_requests;
+
+	for_each_set_bit(req, &vcpu->requests, BITS_PER_LONG) {
+		clear_bit(req, &vcpu->requests);
+		switch (req) {
+		case KVM_REQ_MMU_RELOAD:
 			kvm_mmu_unload(vcpu);
 			r = kvm_mmu_reload(vcpu);
 			if (unlikely(r)) {
 				kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
 				goto out;
 			}
-		}
-		if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
+			break;
+		case KVM_REQ_MIGRATE_TIMER:
 			__kvm_migrate_timers(vcpu);
-		if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
+			break;
+		case KVM_REQ_CLOCK_UPDATE:
 			r = kvm_guest_time_update(vcpu);
 			if (unlikely(r))
 				goto out;
-		}
-		if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
+			break;
+		case KVM_REQ_MMU_SYNC:
 			kvm_mmu_sync_roots(vcpu);
-		if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
+			break;
+		case KVM_REQ_TLB_FLUSH:
 			kvm_x86_ops->tlb_flush(vcpu);
-		if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
+			break;
+		case KVM_REQ_REPORT_TPR_ACCESS:
 			vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
 			r = 0;
 			goto out;
-		}
-		if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
+		case KVM_REQ_TRIPLE_FAULT:
 			vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
 			r = 0;
 			goto out;
-		}
-		if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) {
+		case KVM_REQ_DEACTIVATE_FPU:
 			vcpu->fpu_active = 0;
 			kvm_x86_ops->fpu_deactivate(vcpu);
-		}
-		if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
+			break;
+		case KVM_REQ_APF_HALT:
 			/* Page is swapped out. Do synthetic halt */
 			vcpu->arch.apf.halted = true;
 			r = 1;
 			goto out;
-		}
-		if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
+		case KVM_REQ_STEAL_UPDATE:
 			record_steal_time(vcpu);
-		if (kvm_check_request(KVM_REQ_NMI, vcpu))
+			break;
+		case KVM_REQ_NMI:
 			process_nmi(vcpu);
-		req_immediate_exit =
-			kvm_check_request(KVM_REQ_IMMEDIATE_EXIT, vcpu);
-		if (kvm_check_request(KVM_REQ_PMU, vcpu))
+			break;
+		case KVM_REQ_IMMEDIATE_EXIT:
+			req_immediate_exit = true;
+			break;
+		case KVM_REQ_PMU:
 			kvm_handle_pmu_event(vcpu);
-		if (kvm_check_request(KVM_REQ_PMI, vcpu))
+			break;
+		case KVM_REQ_PMI:
 			kvm_deliver_pmi(vcpu);
-		if (kvm_check_request(KVM_REQ_EVENT, vcpu)) {
+			break;
+		case KVM_REQ_EVENT:
 			inject_pending_event(vcpu);
 
 			/* enable NMI/IRQ window open exits if needed */
@@ -5288,9 +5299,14 @@  static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 				update_cr8_intercept(vcpu);
 				kvm_lapic_sync_to_vapic(vcpu);
 			}
+			break;
+		default:
+			BUG();
 		}
 	}
 
+no_requests:
+
 	preempt_disable();
 
 	kvm_x86_ops->prepare_guest_switch(vcpu);