diff mbox

[5/6] KVM: x86: do not scan IRR twice on APICv vmentry

Message ID 20170207214946.GF31091@potion (mailing list archive)
State New, archived
Headers show

Commit Message

Radim Krčmář Feb. 7, 2017, 9:49 p.m. UTC
2017-02-07 21:19+0100, Radim Krčmář:
> 2016-12-19 17:17+0100, Paolo Bonzini:
> > Calls to apic_find_highest_irr are scanning IRR twice, once
> > in vmx_sync_pir_from_irr and once in apic_search_irr.  Change
> > sync_pir_from_irr to get the new maximum IRR from kvm_apic_update_irr;
> > now that it does the computation, it can also do the RVI write.
> > 
> > In order to avoid complications in svm.c, make the callback optional.
> > 
> > Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> > ---
> > diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> > @@ -8734,20 +8736,24 @@ static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
> >  	}
> >  }
> >  
> > -static void vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
> > +static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
> >  {
> >  	struct vcpu_vmx *vmx = to_vmx(vcpu);
> > +	int max_irr;
> >  
> > -	if (!pi_test_on(&vmx->pi_desc))
> > -		return;
> > -
> > -	pi_clear_on(&vmx->pi_desc);
> > -	/*
> > -	 * IOMMU can write to PIR.ON, so the barrier matters even on UP.
> > -	 * But on x86 this is just a compiler barrier anyway.
> > -	 */
> > -	smp_mb__after_atomic();
> > -	kvm_apic_update_irr(vcpu, vmx->pi_desc.pir);
> > +	if (vcpu->arch.apicv_active && pi_test_on(&vmx->pi_desc)) {
> > +		pi_clear_on(&vmx->pi_desc);
> > +		/*
> > +		 * IOMMU can write to PIR.ON, so the barrier matters even on UP.
> > +		 * But on x86 this is just a compiler barrier anyway.
> > +		 */
> > +		smp_mb__after_atomic();
> > +		max_irr = kvm_apic_update_irr(vcpu, vmx->pi_desc.pir);
> > +	} else {
> > +		max_irr = kvm_lapic_find_highest_irr(vcpu);
> > +	}
> > +	vmx_hwapic_irr_update(vcpu, max_irr);
> 
> Btw. a v1 discussion revolved about the need to have
> vmx_hwapic_irr_update() here when the maximal IRR should always be in
> RVI, and, uh, I didn't follow up (negligible attention span) ...
> 
> There is one place where that doesn't hold: we don't update RVI after a
> EXTERNAL_INTERRUPT nested VM exit without VM_EXIT_ACK_INTR_ON_EXIT, but
> IRR has likely changed.  Isn't that the problem?

The following patch on top of the whole series survives kvm-unit-tests
and very mimimal testing: (Not sure if I have missed something.)
diff mbox

Patch

diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index a1e9cab7d01f..8b98c1681803 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -577,10 +577,10 @@  static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
 
 static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
 {
-	int highest_irr;
+	int highest_irr = -1;
 	if (kvm_x86_ops->sync_pir_to_irr)
 		highest_irr = kvm_x86_ops->sync_pir_to_irr(apic->vcpu);
-	else
+	if (highest_irr == -1)
 		highest_irr = apic_find_highest_irr(apic);
 	if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr)
 		return -1;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 9c8a16edf88d..637c7bd2f3ab 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -8728,18 +8728,18 @@  static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
 	struct vcpu_vmx *vmx = to_vmx(vcpu);
 	int max_irr;
 
-	if (vcpu->arch.apicv_active && pi_test_on(&vmx->pi_desc)) {
-		pi_clear_on(&vmx->pi_desc);
-		/*
-		 * IOMMU can write to PIR.ON, so the barrier matters even on UP.
-		 * But on x86 this is just a compiler barrier anyway.
-		 */
-		smp_mb__after_atomic();
-		max_irr = kvm_apic_update_irr(vcpu, vmx->pi_desc.pir);
-	} else {
-		max_irr = kvm_lapic_find_highest_irr(vcpu);
-	}
+	if (!vcpu->arch.apicv_active || !pi_test_on(&vmx->pi_desc))
+		return -1;
+
+	pi_clear_on(&vmx->pi_desc);
+	/*
+	 * IOMMU can write to PIR.ON, so the barrier matters even on UP.
+	 * But on x86 this is just a compiler barrier anyway.
+	 */
+	smp_mb__after_atomic();
+	max_irr = kvm_apic_update_irr(vcpu, vmx->pi_desc.pir);
 	vmx_hwapic_irr_update(vcpu, max_irr);
+
 	return max_irr;
 }
 
@@ -11145,6 +11145,7 @@  static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
 
 	/* in case we halted in L2 */
 	vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+	kvm_x86_ops->hwapic_irr_update(vcpu, kvm_lapic_find_highest_irr(vcpu));
 }
 
 /*