diff mbox

[v3,01/11] KVM: x86: Optimization: Create SVM stubs for sync_pir_to_irr()

Message ID 1514131983-24305-2-git-send-email-liran.alon@oracle.com (mailing list archive)
State New, archived
Headers show

Commit Message

Liran Alon Dec. 24, 2017, 4:12 p.m. UTC
sync_pir_to_irr() is only called if vcpu->arch.apicv_active()==true.
In case it is false, VMX code make sure to set sync_pir_to_irr
to NULL.

Therefore, having SVM stubs allows to remove check for if
sync_pir_to_irr != NULL from all calling sites.

Signed-off-by: Liran Alon <liran.alon@oracle.com>
Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Nikita Leshenko <nikita.leshchenko@oracle.com>
Reviewed-by: Liam Merwick <liam.merwick@oracle.com>
Signed-off-by: Liam Merwick <liam.merwick@oracle.com>
---
 arch/x86/kvm/lapic.c |  2 +-
 arch/x86/kvm/svm.c   |  6 ++++++
 arch/x86/kvm/x86.c   | 10 ++++------
 3 files changed, 11 insertions(+), 7 deletions(-)

Comments

Paolo Bonzini Dec. 27, 2017, 9:56 a.m. UTC | #1
On 24/12/2017 17:12, Liran Alon wrote:
> -	if (kvm_x86_ops->sync_pir_to_irr && apic->vcpu->arch.apicv_active)
> +	if (apic->vcpu->arch.apicv_active)
>  		highest_irr = kvm_x86_ops->sync_pir_to_irr(apic->vcpu);
>  	else
>  		highest_irr = apic_find_highest_irr(apic);
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index eb714f1cdf7e..99c42deb742b 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -4449,6 +4449,11 @@ static void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
>  {
>  }
>  
> +static int svm_sync_pir_to_irr(struct kvm_vcpu *vcpu)
> +{
> +	return -1;
> +}

Shouldn't this be

	return kvm_lapic_find_highest_irr(vcpu);

?

Paolo
Liran Alon Dec. 27, 2017, 10:01 a.m. UTC | #2
On 27/12/17 11:56, Paolo Bonzini wrote:
> On 24/12/2017 17:12, Liran Alon wrote:
>> -	if (kvm_x86_ops->sync_pir_to_irr && apic->vcpu->arch.apicv_active)
>> +	if (apic->vcpu->arch.apicv_active)
>>   		highest_irr = kvm_x86_ops->sync_pir_to_irr(apic->vcpu);
>>   	else
>>   		highest_irr = apic_find_highest_irr(apic);
>> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
>> index eb714f1cdf7e..99c42deb742b 100644
>> --- a/arch/x86/kvm/svm.c
>> +++ b/arch/x86/kvm/svm.c
>> @@ -4449,6 +4449,11 @@ static void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
>>   {
>>   }
>>
>> +static int svm_sync_pir_to_irr(struct kvm_vcpu *vcpu)
>> +{
>> +	return -1;
>> +}
>
> Shouldn't this be
>
> 	return kvm_lapic_find_highest_irr(vcpu);
>
> ?
>
> Paolo
>

Yes you are correct. My bad.
This would break apic_has_interrupt_for_ppr() in case apicv_active in 
case of AMD SVM.

If there is more comments justifying a v4 series, I would fix it in that 
series. Otherwise, could you just change this commit when applying?

Thanks,
-Liran
diff mbox

Patch

diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index e2c1fb8d35ce..0928608750e3 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -581,7 +581,7 @@  static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
 static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
 {
 	int highest_irr;
-	if (kvm_x86_ops->sync_pir_to_irr && apic->vcpu->arch.apicv_active)
+	if (apic->vcpu->arch.apicv_active)
 		highest_irr = kvm_x86_ops->sync_pir_to_irr(apic->vcpu);
 	else
 		highest_irr = apic_find_highest_irr(apic);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index eb714f1cdf7e..99c42deb742b 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -4449,6 +4449,11 @@  static void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
 {
 }
 
+static int svm_sync_pir_to_irr(struct kvm_vcpu *vcpu)
+{
+	return -1;
+}
+
 /* Note: Currently only used by Hyper-V. */
 static void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
 {
@@ -5581,6 +5586,7 @@  static int enable_smi_window(struct kvm_vcpu *vcpu)
 	.load_eoi_exitmap = svm_load_eoi_exitmap,
 	.hwapic_irr_update = svm_hwapic_irr_update,
 	.hwapic_isr_update = svm_hwapic_isr_update,
+	.sync_pir_to_irr = svm_sync_pir_to_irr,
 	.apicv_post_state_restore = avic_post_state_restore,
 
 	.set_tss_addr = svm_set_tss_addr,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index faf843c9b916..82750791153e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2943,7 +2943,7 @@  void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
 				    struct kvm_lapic_state *s)
 {
-	if (kvm_x86_ops->sync_pir_to_irr && vcpu->arch.apicv_active)
+	if (vcpu->arch.apicv_active)
 		kvm_x86_ops->sync_pir_to_irr(vcpu);
 
 	return kvm_apic_get_state(vcpu, s);
@@ -6749,7 +6749,7 @@  static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
 	if (irqchip_split(vcpu->kvm))
 		kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
 	else {
-		if (kvm_x86_ops->sync_pir_to_irr && vcpu->arch.apicv_active)
+		if (vcpu->arch.apicv_active)
 			kvm_x86_ops->sync_pir_to_irr(vcpu);
 		kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
 	}
@@ -6981,10 +6981,8 @@  static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 	 * This handles the case where a posted interrupt was
 	 * notified with kvm_vcpu_kick.
 	 */
-	if (kvm_lapic_enabled(vcpu)) {
-		if (kvm_x86_ops->sync_pir_to_irr && vcpu->arch.apicv_active)
-			kvm_x86_ops->sync_pir_to_irr(vcpu);
-	}
+	if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active)
+		kvm_x86_ops->sync_pir_to_irr(vcpu);
 
 	if (vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu)
 	    || need_resched() || signal_pending(current)) {