diff mbox series

[v2,11/12] KVM: SVM: Do not inhibit APICv when x2APIC is present

Message ID 20220412115822.14351-12-suravee.suthikulpanit@amd.com (mailing list archive)
State New, archived
Headers show
Series Introducing AMD x2APIC Virtualization (x2AVIC) support | expand

Commit Message

Suthikulpanit, Suravee April 12, 2022, 11:58 a.m. UTC
Currently, AVIC is inhibited when booting a VM w/ x2APIC support.
This is because AVIC cannot virtualize x2APIC mode in the VM.
With x2AVIC support, the APICV_INHIBIT_REASON_X2APIC is
no longer enforced.

Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
---
 arch/x86/kvm/svm/avic.c | 19 +++++++++++++++++++
 arch/x86/kvm/svm/svm.c  | 17 ++---------------
 arch/x86/kvm/svm/svm.h  |  1 +
 3 files changed, 22 insertions(+), 15 deletions(-)

Comments

Maxim Levitsky April 19, 2022, 1:29 p.m. UTC | #1
On Tue, 2022-04-12 at 06:58 -0500, Suravee Suthikulpanit wrote:
> Currently, AVIC is inhibited when booting a VM w/ x2APIC support.
> This is because AVIC cannot virtualize x2APIC mode in the VM.
> With x2AVIC support, the APICV_INHIBIT_REASON_X2APIC is
> no longer enforced.
> 
> Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
> Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
> ---
>  arch/x86/kvm/svm/avic.c | 19 +++++++++++++++++++
>  arch/x86/kvm/svm/svm.c  | 17 ++---------------
>  arch/x86/kvm/svm/svm.h  |  1 +
>  3 files changed, 22 insertions(+), 15 deletions(-)
> 
> diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
> index 085a82e95cb0..abcf761c0c53 100644
> --- a/arch/x86/kvm/svm/avic.c
> +++ b/arch/x86/kvm/svm/avic.c
> @@ -21,6 +21,7 @@
>  
>  #include <asm/irq_remapping.h>
>  
> +#include "cpuid.h"
>  #include "trace.h"
>  #include "lapic.h"
>  #include "x86.h"
> @@ -159,6 +160,24 @@ void avic_vm_destroy(struct kvm *kvm)
>  	spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
>  }
>  
> +void avic_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu, int nested)
> +{
> +	/*
> +	 * If the X2APIC feature is exposed to the guest,
> +	 * disable AVIC unless X2AVIC mode is enabled.
> +	 */
> +	if (avic_mode == AVIC_MODE_X1 &&
> +	    guest_cpuid_has(vcpu, X86_FEATURE_X2APIC))
> +		kvm_set_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_X2APIC);
> +
> +	/*
> +	 * Currently, AVIC does not work with nested virtualization.
> +	 * So, we disable AVIC when cpuid for SVM is set in the L1 guest.
> +	 */
> +	if (nested && guest_cpuid_has(vcpu, X86_FEATURE_SVM))
> +		kvm_set_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_NESTED);
> +}
> +
>  int avic_vm_init(struct kvm *kvm)
>  {
>  	unsigned long flags;
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index b7dbd8bb2c0a..931998d1d8c4 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -3961,7 +3961,6 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
>  {
>  	struct vcpu_svm *svm = to_svm(vcpu);
>  	struct kvm_cpuid_entry2 *best;
> -	struct kvm *kvm = vcpu->kvm;
>  
>  	vcpu->arch.xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
>  				    boot_cpu_has(X86_FEATURE_XSAVE) &&
> @@ -3982,21 +3981,9 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
>  			vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f));
>  	}
>  
> -	if (kvm_vcpu_apicv_active(vcpu)) {
> -		/*
> -		 * AVIC does not work with an x2APIC mode guest. If the X2APIC feature
> -		 * is exposed to the guest, disable AVIC.
> -		 */
> -		if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC))
> -			kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_X2APIC);
> +	if (kvm_vcpu_apicv_active(vcpu))
> +		avic_vcpu_after_set_cpuid(vcpu, nested);
>  
> -		/*
> -		 * Currently, AVIC does not work with nested virtualization.
> -		 * So, we disable AVIC when cpuid for SVM is set in the L1 guest.
> -		 */
> -		if (nested && guest_cpuid_has(vcpu, X86_FEATURE_SVM))
> -			kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_NESTED);
> -	}
>  	init_vmcb_after_set_cpuid(vcpu);
>  }
>  
> diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
> index e340c86941be..0312eec7c7f5 100644
> --- a/arch/x86/kvm/svm/svm.h
> +++ b/arch/x86/kvm/svm/svm.h
> @@ -583,6 +583,7 @@ int avic_init_vcpu(struct vcpu_svm *svm);
>  void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
>  void __avic_vcpu_put(struct kvm_vcpu *vcpu);
>  void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu);
> +void avic_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu, int nested);
>  void avic_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
>  void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
>  bool avic_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason);

Hi!


I just got an idea, while writing a kvm selftest that would use AVIC,
and finding out that selftest code uploads the '-host' cpuid right away
which has x2apic enabled and that inhibits AVIC, and later clearing x2apic
in the cpuid doesn't un-inhibit it.
 
That can be fixed in few ways but that got me thinking:
 
Why do we inhibit AVIC when the guest uses x2apic, even without X2AVIC?
I think that if we didn't it would just work, and even work faster than
pure software x2apic.
 
My thinking is:
 
- when a vcpu itself uses its x2apic, even if its avic is not inhibited, 
the guest will write x2apic msrs which kvm intercepts and will correctly emulate a proper x2apic.
 
- vcpu peers will also use x2apic msrs and again it will work correctly 
(even when there are more than 256 vcpus).
 
- and the host + iommu will still be able to use AVIC's doorbell to send interrupts to the guest
and that doesn't need apic ids or anything, it should work just fine. 

Also AVIC should have no issues scanning IRR and injecting interrupts on VM entry, 
x2apic mode doesn't matter for that.
 
AVIC mmio can still be though discovered by the guest which is technically against x86 spec
(in x2apic mode, mmio supposed to not work) but that can be fixed easily by disabing
the AVIC memslot if any of the vCPUs are in x2apic mode, or this can be ignored since
it should not cause any issues.
We seem to have a quirk for that KVM_X86_QUIRK_LAPIC_MMIO_HOLE.
 
On top of all this, removing this inhibit will also allow to test AVIC with guest
which does have x2apic in the CPUID but doesn't use it (e.g kvm unit test, or
linux booted with nox2apic, which is also nice IMHO)
 
What do you think?

Best regards,
	Maxim Levitsky
Suthikulpanit, Suravee April 26, 2022, 2:25 a.m. UTC | #2
Hi Maim,

On 4/19/22 8:29 PM, Maxim Levitsky wrote:
> On Tue, 2022-04-12 at 06:58 -0500, Suravee Suthikulpanit wrote:
> 
> Hi!
> 
> 
> I just got an idea, while writing a kvm selftest that would use AVIC,
> and finding out that selftest code uploads the '-host' cpuid right away
> which has x2apic enabled and that inhibits AVIC, and later clearing x2apic
> in the cpuid doesn't un-inhibit it.
>   
> That can be fixed in few ways but that got me thinking:
>   
> Why do we inhibit AVIC when the guest uses x2apic, even without X2AVIC?
> I think that if we didn't it would just work, and even work faster than
> pure software x2apic.
>   
> My thinking is:
>   
> - when a vcpu itself uses its x2apic, even if its avic is not inhibited,
> the guest will write x2apic msrs which kvm intercepts and will correctly emulate a proper x2apic.
>   
> - vcpu peers will also use x2apic msrs and again it will work correctly
> (even when there are more than 256 vcpus).
>   
> - and the host + iommu will still be able to use AVIC's doorbell to send interrupts to the guest
> and that doesn't need apic ids or anything, it should work just fine.
> 
> Also AVIC should have no issues scanning IRR and injecting interrupts on VM entry,
> x2apic mode doesn't matter for that.
>   
> AVIC mmio can still be though discovered by the guest which is technically against x86 spec
> (in x2apic mode, mmio supposed to not work) but that can be fixed easily by disabing
> the AVIC memslot if any of the vCPUs are in x2apic mode, or this can be ignored since
> it should not cause any issues.
> We seem to have a quirk for that KVM_X86_QUIRK_LAPIC_MMIO_HOLE.
>   
> On top of all this, removing this inhibit will also allow to test AVIC with guest
> which does have x2apic in the CPUID but doesn't use it (e.g kvm unit test, or
> linux booted with nox2apic, which is also nice IMHO)
>   
> What do you think?

This is actually a good idea!!! Let's call it hybrid-x2AVIC :)

I am working on prototype and test out the support for this, which will be introduced in V3.

Regards,
Suravee
Maxim Levitsky April 26, 2022, 7:06 a.m. UTC | #3
On Tue, 2022-04-26 at 09:25 +0700, Suravee Suthikulpanit wrote:
> Hi Maim,
> 
> On 4/19/22 8:29 PM, Maxim Levitsky wrote:
> > On Tue, 2022-04-12 at 06:58 -0500, Suravee Suthikulpanit wrote:
> > 
> > Hi!
> > 
> > 
> > I just got an idea, while writing a kvm selftest that would use AVIC,
> > and finding out that selftest code uploads the '-host' cpuid right away
> > which has x2apic enabled and that inhibits AVIC, and later clearing x2apic
> > in the cpuid doesn't un-inhibit it.
> >   
> > That can be fixed in few ways but that got me thinking:
> >   
> > Why do we inhibit AVIC when the guest uses x2apic, even without X2AVIC?
> > I think that if we didn't it would just work, and even work faster than
> > pure software x2apic.
> >   
> > My thinking is:
> >   
> > - when a vcpu itself uses its x2apic, even if its avic is not inhibited,
> > the guest will write x2apic msrs which kvm intercepts and will correctly emulate a proper x2apic.
> >   
> > - vcpu peers will also use x2apic msrs and again it will work correctly
> > (even when there are more than 256 vcpus).
> >   
> > - and the host + iommu will still be able to use AVIC's doorbell to send interrupts to the guest
> > and that doesn't need apic ids or anything, it should work just fine.
> > 
> > Also AVIC should have no issues scanning IRR and injecting interrupts on VM entry,
> > x2apic mode doesn't matter for that.
> >   
> > AVIC mmio can still be though discovered by the guest which is technically against x86 spec
> > (in x2apic mode, mmio supposed to not work) but that can be fixed easily by disabing
> > the AVIC memslot if any of the vCPUs are in x2apic mode, or this can be ignored since
> > it should not cause any issues.
> > We seem to have a quirk for that KVM_X86_QUIRK_LAPIC_MMIO_HOLE.
> >   
> > On top of all this, removing this inhibit will also allow to test AVIC with guest
> > which does have x2apic in the CPUID but doesn't use it (e.g kvm unit test, or
> > linux booted with nox2apic, which is also nice IMHO)
> >   
> > What do you think?
> 
> This is actually a good idea!!! Let's call it hybrid-x2AVIC :)
> 
> I am working on prototype and test out the support for this, which will be introduced in V3.

Thanks! 

Best regards,
	Maxim Levitsky

> 
> Regards,
> Suravee
>
Maxim Levitsky April 26, 2022, 9:56 a.m. UTC | #4
On Tue, 2022-04-26 at 10:06 +0300, Maxim Levitsky wrote:
> On Tue, 2022-04-26 at 09:25 +0700, Suravee Suthikulpanit wrote:
> > Hi Maim,
> > 
> > On 4/19/22 8:29 PM, Maxim Levitsky wrote:
> > > On Tue, 2022-04-12 at 06:58 -0500, Suravee Suthikulpanit wrote:
> > > 
> > > Hi!
> > > 
> > > 
> > > I just got an idea, while writing a kvm selftest that would use AVIC,
> > > and finding out that selftest code uploads the '-host' cpuid right away
> > > which has x2apic enabled and that inhibits AVIC, and later clearing x2apic
> > > in the cpuid doesn't un-inhibit it.
> > >   
> > > That can be fixed in few ways but that got me thinking:
> > >   
> > > Why do we inhibit AVIC when the guest uses x2apic, even without X2AVIC?
> > > I think that if we didn't it would just work, and even work faster than
> > > pure software x2apic.
> > >   
> > > My thinking is:
> > >   
> > > - when a vcpu itself uses its x2apic, even if its avic is not inhibited,
> > > the guest will write x2apic msrs which kvm intercepts and will correctly emulate a proper x2apic.
> > >   
> > > - vcpu peers will also use x2apic msrs and again it will work correctly
> > > (even when there are more than 256 vcpus).
> > >   
> > > - and the host + iommu will still be able to use AVIC's doorbell to send interrupts to the guest
> > > and that doesn't need apic ids or anything, it should work just fine.
> > > 
> > > Also AVIC should have no issues scanning IRR and injecting interrupts on VM entry,
> > > x2apic mode doesn't matter for that.
> > >   
> > > AVIC mmio can still be though discovered by the guest which is technically against x86 spec
> > > (in x2apic mode, mmio supposed to not work) but that can be fixed easily by disabing
> > > the AVIC memslot if any of the vCPUs are in x2apic mode, or this can be ignored since
> > > it should not cause any issues.
> > > We seem to have a quirk for that KVM_X86_QUIRK_LAPIC_MMIO_HOLE.
> > >   
> > > On top of all this, removing this inhibit will also allow to test AVIC with guest
> > > which does have x2apic in the CPUID but doesn't use it (e.g kvm unit test, or
> > > linux booted with nox2apic, which is also nice IMHO)
> > >   
> > > What do you think?
> > 
> > This is actually a good idea!!! Let's call it hybrid-x2AVIC :)
> > 
> > I am working on prototype and test out the support for this, which will be introduced in V3.
> 
> Thanks! 
> 
> Best regards,
> 	Maxim Levitsky
> 
> > Regards,
> > Suravee
> > 

BTW, can I ask you to check something on the AMD side of things of AVIC?

I noticed that AMD's manual states that:

"Multiprocessor VM requirements. When running a VM which has multiple virtual CPUs, and the
VMM runs a virtual CPU on a core which had last run a different virtual CPU from the same VM,
regardless of the respective ASID values, care must be taken to flush the TLB on the VMRUN using a
TLB_CONTROL value of 3h. Failure to do so may result in stale mappings misdirecting virtual APIC
accesses to the previous virtual CPU's APIC backing page."

It it relevant to KVM? I don't fully understand why it was mentioned that ASID doesn't matter,
what makes it special about 'virtual CPU from the same VM' if ASID doesn't matter? 

Also, is this still relevant on modern AMD cpus, or was a workaround for some old CPU bug?

Best regards,
	Maxim Levitsky
Sean Christopherson April 29, 2022, 5 p.m. UTC | #5
On Tue, Apr 26, 2022, Maxim Levitsky wrote:
> On Tue, 2022-04-26 at 10:06 +0300, Maxim Levitsky wrote:
> BTW, can I ask you to check something on the AMD side of things of AVIC?
> 
> I noticed that AMD's manual states that:
> 
> "Multiprocessor VM requirements. When running a VM which has multiple virtual CPUs, and the
> VMM runs a virtual CPU on a core which had last run a different virtual CPU from the same VM,
> regardless of the respective ASID values, care must be taken to flush the TLB on the VMRUN using a
> TLB_CONTROL value of 3h. Failure to do so may result in stale mappings misdirecting virtual APIC
> accesses to the previous virtual CPU's APIC backing page."
> 
> It it relevant to KVM? I don't fully understand why it was mentioned that ASID doesn't matter,
> what makes it special about 'virtual CPU from the same VM' if ASID doesn't matter? 

I believe it's calling out that, because vCPUs from the same VM likely share an ASID,
the magic TLB entry for the APIC-access page, which redirects to the virtual APIC page,
will be preserved.  And so if the hypervisor doesn't flush the ASID/TLB, accelerated
xAPIC accesses for the new vCPU will go to the previous vCPU's virtual APIC page.

Intel has the same requirement, though this specific scenario isn't as well documented.
E.g. even if using EPT and VPID, the EPT still needs to be invalidated because the
TLB can cache guest-physical mappings, which are not associated with a VPID.

Huh.  I was going to say that KVM does the necessary flushes in vmx_vcpu_load_vmcs()
and pre_svm_run(), but I don't think that's true.  KVM flushes if the _new_ VMCS/VMCB
is being migrated to a different pCPU, but neither VMX nor SVM flush when switching
between vCPUs that are both "loaded" on the current pCPU.

Switching between vmcs01 and vmcs02 is ok, because KVM always forces a different
EPTP, even if L1 is using shadow paging (the guest_mode bit in the role prevents
reusing a root).  nSVM is "ok" because it flushes on every transition anyways.
Maxim Levitsky May 1, 2022, 6:49 a.m. UTC | #6
On Fri, 2022-04-29 at 17:00 +0000, Sean Christopherson wrote:
> On Tue, Apr 26, 2022, Maxim Levitsky wrote:
> > On Tue, 2022-04-26 at 10:06 +0300, Maxim Levitsky wrote:
> > BTW, can I ask you to check something on the AMD side of things of AVIC?
> > 
> > I noticed that AMD's manual states that:
> > 
> > "Multiprocessor VM requirements. When running a VM which has multiple virtual CPUs, and the
> > VMM runs a virtual CPU on a core which had last run a different virtual CPU from the same VM,
> > regardless of the respective ASID values, care must be taken to flush the TLB on the VMRUN using a
> > TLB_CONTROL value of 3h. Failure to do so may result in stale mappings misdirecting virtual APIC
> > accesses to the previous virtual CPU's APIC backing page."
> > 
> > It it relevant to KVM? I don't fully understand why it was mentioned that ASID doesn't matter,
> > what makes it special about 'virtual CPU from the same VM' if ASID doesn't matter? 
> 
> I believe it's calling out that, because vCPUs from the same VM likely share an ASID,
> the magic TLB entry for the APIC-access page, which redirects to the virtual APIC page,
> will be preserved.  And so if the hypervisor doesn't flush the ASID/TLB, accelerated
> xAPIC accesses for the new vCPU will go to the previous vCPU's virtual APIC page.

This is what I want to think as well, but the manual says explicitly 
"regardless of the respective ASID values"

On the face value of it, the only logical way to read this IMHO,
is that every time apic backing page is changed, we need to issue a TLB flush.

Best regards,
	Maxim Levitsky

> 
> Intel has the same requirement, though this specific scenario isn't as well documented.
> E.g. even if using EPT and VPID, the EPT still needs to be invalidated because the
> TLB can cache guest-physical mappings, which are not associated with a VPID.
> 
> Huh.  I was going to say that KVM does the necessary flushes in vmx_vcpu_load_vmcs()
> and pre_svm_run(), but I don't think that's true.  KVM flushes if the _new_ VMCS/VMCB
> is being migrated to a different pCPU, but neither VMX nor SVM flush when switching
> between vCPUs that are both "loaded" on the current pCPU.
> 
> Switching between vmcs01 and vmcs02 is ok, because KVM always forces a different
> EPTP, even if L1 is using shadow paging (the guest_mode bit in the role prevents
> reusing a root).  nSVM is "ok" because it flushes on every transition anyways.
>
diff mbox series

Patch

diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index 085a82e95cb0..abcf761c0c53 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -21,6 +21,7 @@ 
 
 #include <asm/irq_remapping.h>
 
+#include "cpuid.h"
 #include "trace.h"
 #include "lapic.h"
 #include "x86.h"
@@ -159,6 +160,24 @@  void avic_vm_destroy(struct kvm *kvm)
 	spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
 }
 
+void avic_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu, int nested)
+{
+	/*
+	 * If the X2APIC feature is exposed to the guest,
+	 * disable AVIC unless X2AVIC mode is enabled.
+	 */
+	if (avic_mode == AVIC_MODE_X1 &&
+	    guest_cpuid_has(vcpu, X86_FEATURE_X2APIC))
+		kvm_set_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_X2APIC);
+
+	/*
+	 * Currently, AVIC does not work with nested virtualization.
+	 * So, we disable AVIC when cpuid for SVM is set in the L1 guest.
+	 */
+	if (nested && guest_cpuid_has(vcpu, X86_FEATURE_SVM))
+		kvm_set_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_NESTED);
+}
+
 int avic_vm_init(struct kvm *kvm)
 {
 	unsigned long flags;
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index b7dbd8bb2c0a..931998d1d8c4 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3961,7 +3961,6 @@  static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 	struct kvm_cpuid_entry2 *best;
-	struct kvm *kvm = vcpu->kvm;
 
 	vcpu->arch.xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
 				    boot_cpu_has(X86_FEATURE_XSAVE) &&
@@ -3982,21 +3981,9 @@  static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
 			vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f));
 	}
 
-	if (kvm_vcpu_apicv_active(vcpu)) {
-		/*
-		 * AVIC does not work with an x2APIC mode guest. If the X2APIC feature
-		 * is exposed to the guest, disable AVIC.
-		 */
-		if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC))
-			kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_X2APIC);
+	if (kvm_vcpu_apicv_active(vcpu))
+		avic_vcpu_after_set_cpuid(vcpu, nested);
 
-		/*
-		 * Currently, AVIC does not work with nested virtualization.
-		 * So, we disable AVIC when cpuid for SVM is set in the L1 guest.
-		 */
-		if (nested && guest_cpuid_has(vcpu, X86_FEATURE_SVM))
-			kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_NESTED);
-	}
 	init_vmcb_after_set_cpuid(vcpu);
 }
 
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index e340c86941be..0312eec7c7f5 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -583,6 +583,7 @@  int avic_init_vcpu(struct vcpu_svm *svm);
 void __avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
 void __avic_vcpu_put(struct kvm_vcpu *vcpu);
 void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu);
+void avic_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu, int nested);
 void avic_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
 void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
 bool avic_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason);