diff mbox series

[RFC,1/9] KVM:x86: Abstract sub functions from kvm_update_cpuid_runtime() and kvm_vcpu_after_set_cpuid()

Message ID 1596163347-18574-2-git-send-email-robert.hu@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series Split kvm_update_cpuid_runtime() | expand

Commit Message

Robert Hoo July 31, 2020, 2:42 a.m. UTC
Add below functions, whose aggregation equals kvm_update_cpuid_runtime() and
kvm_vcpu_after_set_cpuid().

void kvm_osxsave_update_cpuid(struct kvm_vcpu *vcpu, bool set)
void kvm_pke_update_cpuid(struct kvm_vcpu *vcpu, bool set)
void kvm_apic_base_update_cpuid(struct kvm_vcpu *vcpu, bool set)
void kvm_mwait_update_cpuid(struct kvm_vcpu *vcpu, bool set)
void kvm_xcr0_update_cpuid(struct kvm_vcpu *vcpu)
static void kvm_pv_unhalt_update_cpuid(struct kvm_vcpu *vcpu)
static void kvm_update_maxphyaddr(struct kvm_vcpu *vcpu)
static void kvm_update_lapic_timer_mode(struct kvm_vcpu *vcpu)

And, for some among the above, avoid double check set or clear inside function
body, but provided by caller.

Signed-off-by: Robert Hoo <robert.hu@linux.intel.com>
---
 arch/x86/kvm/cpuid.c | 99 ++++++++++++++++++++++++++++++++++++++++++++++++++++
 arch/x86/kvm/cpuid.h |  6 ++++
 2 files changed, 105 insertions(+)

Comments

Sean Christopherson Sept. 29, 2020, 4:56 a.m. UTC | #1
I think you want "extract", not "abstract".


On Fri, Jul 31, 2020 at 10:42:19AM +0800, Robert Hoo wrote:
> Add below functions, whose aggregation equals kvm_update_cpuid_runtime() and
> kvm_vcpu_after_set_cpuid().
> 
> void kvm_osxsave_update_cpuid(struct kvm_vcpu *vcpu, bool set)
> void kvm_pke_update_cpuid(struct kvm_vcpu *vcpu, bool set)
> void kvm_apic_base_update_cpuid(struct kvm_vcpu *vcpu, bool set)
> void kvm_mwait_update_cpuid(struct kvm_vcpu *vcpu, bool set)
> void kvm_xcr0_update_cpuid(struct kvm_vcpu *vcpu)
> static void kvm_pv_unhalt_update_cpuid(struct kvm_vcpu *vcpu)
> static void kvm_update_maxphyaddr(struct kvm_vcpu *vcpu)
> static void kvm_update_lapic_timer_mode(struct kvm_vcpu *vcpu)
> 
> And, for some among the above, avoid double check set or clear inside function
> body, but provided by caller.
> 
> Signed-off-by: Robert Hoo <robert.hu@linux.intel.com>
> ---
>  arch/x86/kvm/cpuid.c | 99 ++++++++++++++++++++++++++++++++++++++++++++++++++++
>  arch/x86/kvm/cpuid.h |  6 ++++
>  2 files changed, 105 insertions(+)
> 
> diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
> index 7d92854..efa7182 100644
> --- a/arch/x86/kvm/cpuid.c
> +++ b/arch/x86/kvm/cpuid.c
> @@ -73,6 +73,105 @@ static int kvm_check_cpuid(struct kvm_vcpu *vcpu)
>  	return 0;
>  }
>  
> +void kvm_osxsave_update_cpuid(struct kvm_vcpu *vcpu, bool set)
> +{
> +	struct kvm_cpuid_entry2 *best;

I vote that we opportunistically move away from the "best" terminology.  Either
there's a matching entry or there's not.  Using "e" would probably shave a few
lines of code.

> +
> +	best = kvm_find_cpuid_entry(vcpu, 1, 0);
> +
> +	if (best && boot_cpu_has(X86_FEATURE_XSAVE)) {

Braces not needed. We should also check boot_cpu_has() first, it's constant
time and maybe even in the cache, whereas finding CPUID entries is linear
time and outright slow.

Actually, can you add a helper to handle this?  With tht boot_cpu_has() check
outside of the helper?  That'll allow the helper to be used for more features,
and will force checking boot_cpu_has() first.  Hmm, and not all of the callers
will need the boot_cpu_has() check, e.g. toggling PKE from kvm_set_cr4()
doesn't need to be guarded because KVM disallows setting PKE if it's not
supported by the host.

static inline void guest_cpuid_change(struct kvm_vcpu *vcpu, u32 function,
				      u32 index, unsigned int feature, bool set)
{
	struct kvm_cpuid_entry2 *e =  kvm_find_cpuid_entry(vcpu, function, index);

	if (e)
		cpuid_entry_change(best, X86_FEATURE_OSXSAVE, set);
}

> +		cpuid_entry_change(best, X86_FEATURE_OSXSAVE, set);
> +	}
> +}
> +
> +void kvm_pke_update_cpuid(struct kvm_vcpu *vcpu, bool set)
> +{
> +	struct kvm_cpuid_entry2 *best;
> +
> +	best = kvm_find_cpuid_entry(vcpu, 7, 0);
> +
> +	if (best && boot_cpu_has(X86_FEATURE_PKU)) {
> +		cpuid_entry_change(best, X86_FEATURE_OSPKE, set);
> +	}
> +}
> +
> +void kvm_xcr0_update_cpuid(struct kvm_vcpu *vcpu)
> +{
> +	struct kvm_cpuid_entry2 *best;a
> +
> +	best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
> +	if (!best) {
> +		vcpu->arch.guest_supported_xcr0 = 0;
> +	} else {
> +		vcpu->arch.guest_supported_xcr0 =
> +			(best->eax | ((u64)best->edx << 32)) & supported_xcr0;
> +		best->ebx = xstate_required_size(vcpu->arch.xcr0, false);
> +	}
> +
> +	best = kvm_find_cpuid_entry(vcpu, 0xD, 1);
> +	if (!best)
> +		return;
> +	if (cpuid_entry_has(best, X86_FEATURE_XSAVES) ||
> +				cpuid_entry_has(best, X86_FEATURE_XSAVEC))

Indentation should be aligned, e.g.

	if (cpuid_entry_has(best, X86_FEATURE_XSAVES) ||
	    cpuid_entry_has(best, X86_FEATURE_XSAVEC))


> +		best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
> +}
> +
> +static void kvm_pv_unhalt_update_cpuid(struct kvm_vcpu *vcpu)
> +{
> +	struct kvm_cpuid_entry2 *best;
> +
> +	best = kvm_find_cpuid_entry(vcpu, KVM_CPUID_FEATURES, 0);
> +	if (kvm_hlt_in_guest(vcpu->kvm) && best &&
> +					(best->eax & (1 << KVM_FEATURE_PV_UNHALT)))
> +		best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
> +}
> +
> +void kvm_mwait_update_cpuid(struct kvm_vcpu *vcpu, bool set)
> +{
> +	struct kvm_cpuid_entry2 *best;
> +
> +	best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
> +	if (best)
> +		cpuid_entry_change(best, X86_FEATURE_MWAIT, set);
> +}
> +
> +static void kvm_update_maxphyaddr(struct kvm_vcpu *vcpu)
> +{
> +
> +	/* Note, maxphyaddr must be updated before tdp_level. */
> +	vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
> +	vcpu->arch.tdp_level = kvm_x86_ops.get_tdp_level(vcpu);
> +	kvm_mmu_reset_context(vcpu);
> +}
> +
> +void kvm_apic_base_update_cpuid(struct kvm_vcpu *vcpu, bool set)
> +{
> +	struct kvm_cpuid_entry2 *best;
> +
> +	best = kvm_find_cpuid_entry(vcpu, 1, 0);
> +	if (!best)
> +		return;
> +
> +	cpuid_entry_change(best, X86_FEATURE_APIC, set);
> +}
> +
> +static void kvm_update_lapic_timer_mode(struct kvm_vcpu *vcpu)
> +{
> +	struct kvm_cpuid_entry2 *best;
> +	struct kvm_lapic *apic = vcpu->arch.apic;
> +
> +	best = kvm_find_cpuid_entry(vcpu, 1, 0);
> +	if (!best)
> +		return;
> +
> +	if (apic) {

Check apic before the lookup.

> +		if (cpuid_entry_has(best, X86_FEATURE_TSC_DEADLINE_TIMER))
> +			apic->lapic_timer.timer_mode_mask = 3 << 17;
> +		else
> +			apic->lapic_timer.timer_mode_mask = 1 << 17;
> +	}
> +}
> +
>  void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
>  {
>  	struct kvm_cpuid_entry2 *best;
> diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
> index 3a923ae..7eabb44 100644
> --- a/arch/x86/kvm/cpuid.h
> +++ b/arch/x86/kvm/cpuid.h
> @@ -9,6 +9,12 @@
>  extern u32 kvm_cpu_caps[NCAPINTS] __read_mostly;
>  void kvm_set_cpu_caps(void);
>  
> +void kvm_osxsave_update_cpuid(struct kvm_vcpu *vcpu, bool set);
> +void kvm_pke_update_cpuid(struct kvm_vcpu *vcpu, bool set);
> +void kvm_apic_base_update_cpuid(struct kvm_vcpu *vcpu, bool set);
> +void kvm_mwait_update_cpuid(struct kvm_vcpu *vcpu, bool set);
> +void kvm_xcr0_update_cpuid(struct kvm_vcpu *vcpu);
> +
>  void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu);
>  struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
>  					      u32 function, u32 index);
> -- 
> 1.8.3.1
>
Sean Christopherson Sept. 29, 2020, 4:58 a.m. UTC | #2
On Fri, Jul 31, 2020 at 10:42:19AM +0800, Robert Hoo wrote:
> Add below functions, whose aggregation equals kvm_update_cpuid_runtime() and
> kvm_vcpu_after_set_cpuid().
> 
> void kvm_osxsave_update_cpuid(struct kvm_vcpu *vcpu, bool set)
> void kvm_pke_update_cpuid(struct kvm_vcpu *vcpu, bool set)
> void kvm_apic_base_update_cpuid(struct kvm_vcpu *vcpu, bool set)
> void kvm_mwait_update_cpuid(struct kvm_vcpu *vcpu, bool set)
> void kvm_xcr0_update_cpuid(struct kvm_vcpu *vcpu)
> static void kvm_pv_unhalt_update_cpuid(struct kvm_vcpu *vcpu)
> static void kvm_update_maxphyaddr(struct kvm_vcpu *vcpu)
> static void kvm_update_lapic_timer_mode(struct kvm_vcpu *vcpu)

Ugh, I just reread this, you're adding functions with no callers.  This patch
should replace the existing code so that there are callers, and more
importantly so that we can verify old==new.
Robert Hoo Oct. 20, 2020, 5:38 a.m. UTC | #3
On Mon, 2020-09-28 at 21:56 -0700, Sean Christopherson wrote:
> I think you want "extract", not "abstract".
> 
> 
> On Fri, Jul 31, 2020 at 10:42:19AM +0800, Robert Hoo wrote:
> > Add below functions, whose aggregation equals
> > kvm_update_cpuid_runtime() and
> > kvm_vcpu_after_set_cpuid().
> > 
> > void kvm_osxsave_update_cpuid(struct kvm_vcpu *vcpu, bool set)
> > void kvm_pke_update_cpuid(struct kvm_vcpu *vcpu, bool set)
> > void kvm_apic_base_update_cpuid(struct kvm_vcpu *vcpu, bool set)
> > void kvm_mwait_update_cpuid(struct kvm_vcpu *vcpu, bool set)
> > void kvm_xcr0_update_cpuid(struct kvm_vcpu *vcpu)
> > static void kvm_pv_unhalt_update_cpuid(struct kvm_vcpu *vcpu)
> > static void kvm_update_maxphyaddr(struct kvm_vcpu *vcpu)
> > static void kvm_update_lapic_timer_mode(struct kvm_vcpu *vcpu)
> > 
> > And, for some among the above, avoid double check set or clear
> > inside function
> > body, but provided by caller.
> > 
> > Signed-off-by: Robert Hoo <robert.hu@linux.intel.com>
> > ---
> >  arch/x86/kvm/cpuid.c | 99
> > ++++++++++++++++++++++++++++++++++++++++++++++++++++
> >  arch/x86/kvm/cpuid.h |  6 ++++
> >  2 files changed, 105 insertions(+)
> > 
> > diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
> > index 7d92854..efa7182 100644
> > --- a/arch/x86/kvm/cpuid.c
> > +++ b/arch/x86/kvm/cpuid.c
> > @@ -73,6 +73,105 @@ static int kvm_check_cpuid(struct kvm_vcpu
> > *vcpu)
> >  	return 0;
> >  }
> >  
> > +void kvm_osxsave_update_cpuid(struct kvm_vcpu *vcpu, bool set)
> > +{
> > +	struct kvm_cpuid_entry2 *best;
> 
> I vote that we opportunistically move away from the "best"
> terminology.  Either
> there's a matching entry or there's not.  Using "e" would probably
> shave a few
> lines of code.
> 
> > +
> > +	best = kvm_find_cpuid_entry(vcpu, 1, 0);
> > +
> > +	if (best && boot_cpu_has(X86_FEATURE_XSAVE)) {
> 
> Braces not needed. We should also check boot_cpu_has() first, it's
> constant
> time and maybe even in the cache, whereas finding CPUID entries is
> linear
> time and outright slow.
> 
> Actually, can you add a helper to handle this?  With tht
> boot_cpu_has() check
> outside of the helper?  That'll allow the helper to be used for more
> features,
> and will force checking boot_cpu_has() first.  Hmm, and not all of
> the callers
> will need the boot_cpu_has() check, e.g. toggling PKE from
> kvm_set_cr4()
> doesn't need to be guarded because KVM disallows setting PKE if it's
> not
> supported by the host.

Do you mean because in kvm_set_cr4(), it has kvm_valid_cr4(vcpu, cr4)
check first?

Then how about the other 2 callers of kvm_pke_update_cpuid()?
enter_smm() -- I think it can ommit boot_cpu_has() check as well.
because it unconditionally cleared all CR4 bit before calls
kvm_set_cr4().
__set_sregs() -- looks like it doesn't valid host PKE status before
call kvm_pke_update_cpuid(). Can I ommit boot_cpu_has() as well?

So, I don't think kvm_pke_update_cpuid() can leverage the helper. Am I
right?

> 
> static inline void guest_cpuid_change(struct kvm_vcpu *vcpu, u32
> function,
> 				      u32 index, unsigned int feature,
> bool set)
> {
> 	struct kvm_cpuid_entry2 *e =  kvm_find_cpuid_entry(vcpu,
> function, index);
> 
> 	if (e)
> 		cpuid_entry_change(best, X86_FEATURE_OSXSAVE, set);
> }

Thanks Sean, I'm going to have this helper in v2 and have your signed-
off-by.
> 
> > +		cpuid_entry_change(best, X86_FEATURE_OSXSAVE, set);
> > +	}
> > +}
> > +
> > +void kvm_pke_update_cpuid(struct kvm_vcpu *vcpu, bool set)
> > +{
> > +	struct kvm_cpuid_entry2 *best;
> > +
> > +	best = kvm_find_cpuid_entry(vcpu, 7, 0);
> > +
> > +	if (best && boot_cpu_has(X86_FEATURE_PKU)) {
> > +		cpuid_entry_change(best, X86_FEATURE_OSPKE, set);
> > +	}
> > +}
> > +
> > +void kvm_xcr0_update_cpuid(struct kvm_vcpu *vcpu)
> > +{
> > +	struct kvm_cpuid_entry2 *best;a
> > +
> > +	best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
> > +	if (!best) {
> > +		vcpu->arch.guest_supported_xcr0 = 0;
> > +	} else {
> > +		vcpu->arch.guest_supported_xcr0 =
> > +			(best->eax | ((u64)best->edx << 32)) &
> > supported_xcr0;
> > +		best->ebx = xstate_required_size(vcpu->arch.xcr0,
> > false);
> > +	}
> > +
> > +	best = kvm_find_cpuid_entry(vcpu, 0xD, 1);
> > +	if (!best)
> > +		return;
> > +	if (cpuid_entry_has(best, X86_FEATURE_XSAVES) ||
> > +				cpuid_entry_has(best,
> > X86_FEATURE_XSAVEC))
> 
> Indentation should be aligned, e.g.
> 
> 	if (cpuid_entry_has(best, X86_FEATURE_XSAVES) ||
> 	    cpuid_entry_has(best, X86_FEATURE_XSAVEC))
> 
> 
> > +		best->ebx = xstate_required_size(vcpu->arch.xcr0,
> > true);
> > +}
> > +
> > +static void kvm_pv_unhalt_update_cpuid(struct kvm_vcpu *vcpu)
> > +{
> > +	struct kvm_cpuid_entry2 *best;
> > +
> > +	best = kvm_find_cpuid_entry(vcpu, KVM_CPUID_FEATURES, 0);
> > +	if (kvm_hlt_in_guest(vcpu->kvm) && best &&
> > +					(best->eax & (1 <<
> > KVM_FEATURE_PV_UNHALT)))
> > +		best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
> > +}
> > +
> > +void kvm_mwait_update_cpuid(struct kvm_vcpu *vcpu, bool set)
> > +{
> > +	struct kvm_cpuid_entry2 *best;
> > +
> > +	best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
> > +	if (best)
> > +		cpuid_entry_change(best, X86_FEATURE_MWAIT, set);
> > +}
> > +
> > +static void kvm_update_maxphyaddr(struct kvm_vcpu *vcpu)
> > +{
> > +
> > +	/* Note, maxphyaddr must be updated before tdp_level. */
> > +	vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
> > +	vcpu->arch.tdp_level = kvm_x86_ops.get_tdp_level(vcpu);
> > +	kvm_mmu_reset_context(vcpu);
> > +}
> > +
> > +void kvm_apic_base_update_cpuid(struct kvm_vcpu *vcpu, bool set)
> > +{
> > +	struct kvm_cpuid_entry2 *best;
> > +
> > +	best = kvm_find_cpuid_entry(vcpu, 1, 0);
> > +	if (!best)
> > +		return;
> > +
> > +	cpuid_entry_change(best, X86_FEATURE_APIC, set);
> > +}
> > +
> > +static void kvm_update_lapic_timer_mode(struct kvm_vcpu *vcpu)
> > +{
> > +	struct kvm_cpuid_entry2 *best;
> > +	struct kvm_lapic *apic = vcpu->arch.apic;
> > +
> > +	best = kvm_find_cpuid_entry(vcpu, 1, 0);
> > +	if (!best)
> > +		return;
> > +
> > +	if (apic) {
> 
> Check apic before the lookup.
> 
> > +		if (cpuid_entry_has(best,
> > X86_FEATURE_TSC_DEADLINE_TIMER))
> > +			apic->lapic_timer.timer_mode_mask = 3 << 17;
> > +		else
> > +			apic->lapic_timer.timer_mode_mask = 1 << 17;
> > +	}
> > +}
> > +
> >  void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
> >  {

...
> > -- 
> > 1.8.3.1
> >
Sean Christopherson Oct. 20, 2020, 4:44 p.m. UTC | #4
On Tue, Oct 20, 2020 at 01:38:32PM +0800, Robert Hoo wrote:
> On Mon, 2020-09-28 at 21:56 -0700, Sean Christopherson wrote:
> > > +
> > > +	best = kvm_find_cpuid_entry(vcpu, 1, 0);
> > > +
> > > +	if (best && boot_cpu_has(X86_FEATURE_XSAVE)) {
> > 
> > Braces not needed. We should also check boot_cpu_has() first, it's constant
> > time and maybe even in the cache, whereas finding CPUID entries is linear
> > time and outright slow.
> > 
> > Actually, can you add a helper to handle this?  With tht boot_cpu_has()
> > check outside of the helper?  That'll allow the helper to be used for more
> > features, and will force checking boot_cpu_has() first.  Hmm, and not all
> > of the callers will need the boot_cpu_has() check, e.g. toggling PKE from
> > kvm_set_cr4() doesn't need to be guarded because KVM disallows setting PKE
> > if it's not supported by the host.
> 
> Do you mean because in kvm_set_cr4(), it has kvm_valid_cr4(vcpu, cr4)
> check first?

Yes.

> Then how about the other 2 callers of kvm_pke_update_cpuid()?
> enter_smm() -- I think it can ommit boot_cpu_has() check as well.
> because it unconditionally cleared all CR4 bit before calls
> kvm_set_cr4().
> __set_sregs() -- looks like it doesn't valid host PKE status before
> call kvm_pke_update_cpuid(). Can I ommit boot_cpu_has() as well?
> 
> So, I don't think kvm_pke_update_cpuid() can leverage the helper. Am I
> right?

It can leverage the guest_cpuid_change() helper below, no?
 
> > static inline void guest_cpuid_change(struct kvm_vcpu *vcpu, u32
> > function,
> > 				      u32 index, unsigned int feature,
> > bool set)
> > {
> > 	struct kvm_cpuid_entry2 *e =  kvm_find_cpuid_entry(vcpu,
> > function, index);
> > 
> > 	if (e)
> > 		cpuid_entry_change(best, X86_FEATURE_OSXSAVE, set);
> > }
> 
> Thanks Sean, I'm going to have this helper in v2 and have your signed-
> off-by.

Eh, no need to give me credit, it's just a snippet in feedback.  It's
not even correct :-)  E.g. s/X86_FEATURE_OSXSAVE/feature below.

> > 
> > > +		cpuid_entry_change(best, X86_FEATURE_OSXSAVE, set);
> > > +	}
> > > +}
diff mbox series

Patch

diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 7d92854..efa7182 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -73,6 +73,105 @@  static int kvm_check_cpuid(struct kvm_vcpu *vcpu)
 	return 0;
 }
 
+void kvm_osxsave_update_cpuid(struct kvm_vcpu *vcpu, bool set)
+{
+	struct kvm_cpuid_entry2 *best;
+
+	best = kvm_find_cpuid_entry(vcpu, 1, 0);
+
+	if (best && boot_cpu_has(X86_FEATURE_XSAVE)) {
+		cpuid_entry_change(best, X86_FEATURE_OSXSAVE, set);
+	}
+}
+
+void kvm_pke_update_cpuid(struct kvm_vcpu *vcpu, bool set)
+{
+	struct kvm_cpuid_entry2 *best;
+
+	best = kvm_find_cpuid_entry(vcpu, 7, 0);
+
+	if (best && boot_cpu_has(X86_FEATURE_PKU)) {
+		cpuid_entry_change(best, X86_FEATURE_OSPKE, set);
+	}
+}
+
+void kvm_xcr0_update_cpuid(struct kvm_vcpu *vcpu)
+{
+	struct kvm_cpuid_entry2 *best;
+
+	best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
+	if (!best) {
+		vcpu->arch.guest_supported_xcr0 = 0;
+	} else {
+		vcpu->arch.guest_supported_xcr0 =
+			(best->eax | ((u64)best->edx << 32)) & supported_xcr0;
+		best->ebx = xstate_required_size(vcpu->arch.xcr0, false);
+	}
+
+	best = kvm_find_cpuid_entry(vcpu, 0xD, 1);
+	if (!best)
+		return;
+	if (cpuid_entry_has(best, X86_FEATURE_XSAVES) ||
+				cpuid_entry_has(best, X86_FEATURE_XSAVEC))
+		best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
+}
+
+static void kvm_pv_unhalt_update_cpuid(struct kvm_vcpu *vcpu)
+{
+	struct kvm_cpuid_entry2 *best;
+
+	best = kvm_find_cpuid_entry(vcpu, KVM_CPUID_FEATURES, 0);
+	if (kvm_hlt_in_guest(vcpu->kvm) && best &&
+					(best->eax & (1 << KVM_FEATURE_PV_UNHALT)))
+		best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
+}
+
+void kvm_mwait_update_cpuid(struct kvm_vcpu *vcpu, bool set)
+{
+	struct kvm_cpuid_entry2 *best;
+
+	best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
+	if (best)
+		cpuid_entry_change(best, X86_FEATURE_MWAIT, set);
+}
+
+static void kvm_update_maxphyaddr(struct kvm_vcpu *vcpu)
+{
+
+	/* Note, maxphyaddr must be updated before tdp_level. */
+	vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
+	vcpu->arch.tdp_level = kvm_x86_ops.get_tdp_level(vcpu);
+	kvm_mmu_reset_context(vcpu);
+}
+
+void kvm_apic_base_update_cpuid(struct kvm_vcpu *vcpu, bool set)
+{
+	struct kvm_cpuid_entry2 *best;
+
+	best = kvm_find_cpuid_entry(vcpu, 1, 0);
+	if (!best)
+		return;
+
+	cpuid_entry_change(best, X86_FEATURE_APIC, set);
+}
+
+static void kvm_update_lapic_timer_mode(struct kvm_vcpu *vcpu)
+{
+	struct kvm_cpuid_entry2 *best;
+	struct kvm_lapic *apic = vcpu->arch.apic;
+
+	best = kvm_find_cpuid_entry(vcpu, 1, 0);
+	if (!best)
+		return;
+
+	if (apic) {
+		if (cpuid_entry_has(best, X86_FEATURE_TSC_DEADLINE_TIMER))
+			apic->lapic_timer.timer_mode_mask = 3 << 17;
+		else
+			apic->lapic_timer.timer_mode_mask = 1 << 17;
+	}
+}
+
 void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
 {
 	struct kvm_cpuid_entry2 *best;
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index 3a923ae..7eabb44 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -9,6 +9,12 @@ 
 extern u32 kvm_cpu_caps[NCAPINTS] __read_mostly;
 void kvm_set_cpu_caps(void);
 
+void kvm_osxsave_update_cpuid(struct kvm_vcpu *vcpu, bool set);
+void kvm_pke_update_cpuid(struct kvm_vcpu *vcpu, bool set);
+void kvm_apic_base_update_cpuid(struct kvm_vcpu *vcpu, bool set);
+void kvm_mwait_update_cpuid(struct kvm_vcpu *vcpu, bool set);
+void kvm_xcr0_update_cpuid(struct kvm_vcpu *vcpu);
+
 void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu);
 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
 					      u32 function, u32 index);