@@ -82,11 +82,22 @@ static inline void guest_cpuid_change(struct kvm_vcpu *vcpu, u32 function,
cpuid_entry_change(e, feature, set);
}
+void kvm_osxsave_update_cpuid(struct kvm_vcpu *vcpu, bool set)
+{
+ if (boot_cpu_has(X86_FEATURE_XSAVE))
+ guest_cpuid_change(vcpu, 1, 0, X86_FEATURE_OSXSAVE, set);
+}
+
void kvm_apic_base_update_cpuid(struct kvm_vcpu *vcpu, bool set)
{
guest_cpuid_change(vcpu, 1, 0, X86_FEATURE_APIC, set);
}
+void kvm_pke_update_cpuid(struct kvm_vcpu *vcpu, bool set)
+{
+ if (boot_cpu_has(X86_FEATURE_PKU))
+ guest_cpuid_change(vcpu, 7, 0, X86_FEATURE_OSPKE, set);
+}
void kvm_xcr0_update_cpuid(struct kvm_vcpu *vcpu)
{
@@ -9,6 +9,8 @@
extern u32 kvm_cpu_caps[NCAPINTS] __read_mostly;
void kvm_set_cpu_caps(void);
+void kvm_osxsave_update_cpuid(struct kvm_vcpu *vcpu, bool set);
+void kvm_pke_update_cpuid(struct kvm_vcpu *vcpu, bool set);
void kvm_apic_base_update_cpuid(struct kvm_vcpu *vcpu, bool set);
void kvm_xcr0_update_cpuid(struct kvm_vcpu *vcpu);
@@ -1008,8 +1008,10 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
(!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
kvm_mmu_reset_context(vcpu);
- if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
- kvm_update_cpuid_runtime(vcpu);
+ if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
+ kvm_osxsave_update_cpuid(vcpu, !!(cr4 & X86_CR4_OSXSAVE));
+ if ((cr4 ^ old_cr4) & X86_CR4_PKE)
+ kvm_pke_update_cpuid(vcpu, !!(cr4 & X86_CR4_PKE));
return 0;
}
@@ -8177,6 +8179,8 @@ static void enter_smm(struct kvm_vcpu *vcpu)
vcpu->arch.cr0 = cr0;
kvm_x86_ops.set_cr4(vcpu, 0);
+ kvm_osxsave_update_cpuid(vcpu, false);
+ kvm_pke_update_cpuid(vcpu, false);
/* Undocumented: IDT limit is set to zero on entry to SMM. */
dt.address = dt.size = 0;
@@ -8214,7 +8218,6 @@ static void enter_smm(struct kvm_vcpu *vcpu)
kvm_x86_ops.set_efer(vcpu, 0);
#endif
- kvm_update_cpuid_runtime(vcpu);
kvm_mmu_reset_context(vcpu);
}
@@ -9193,7 +9196,7 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{
struct msr_data apic_base_msr;
int mmu_reset_needed = 0;
- int cpuid_update_needed = 0;
+ ulong old_cr4 = 0;
int pending_vec, max_bits, idx;
struct desc_ptr dt;
int ret = -EINVAL;
@@ -9227,12 +9230,15 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
kvm_x86_ops.set_cr0(vcpu, sregs->cr0);
vcpu->arch.cr0 = sregs->cr0;
- mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
- cpuid_update_needed |= ((kvm_read_cr4(vcpu) ^ sregs->cr4) &
- (X86_CR4_OSXSAVE | X86_CR4_PKE));
+ old_cr4 = kvm_read_cr4(vcpu);
+ mmu_reset_needed |= old_cr4 != sregs->cr4;
+
kvm_x86_ops.set_cr4(vcpu, sregs->cr4);
- if (cpuid_update_needed)
- kvm_update_cpuid_runtime(vcpu);
+
+ if ((old_cr4 ^ sregs->cr4) & X86_CR4_OSXSAVE)
+ kvm_osxsave_update_cpuid(vcpu, !!(sregs->cr4 & X86_CR4_OSXSAVE));
+ if ((old_cr4 ^ sregs->cr4) & X86_CR4_PKE)
+ kvm_pke_update_cpuid(vcpu, !!(sregs->cr4 & X86_CR4_PKE));
idx = srcu_read_lock(&vcpu->kvm->srcu);
if (is_pae_paging(vcpu)) {
And substitute kvm_update_cpuid_runtime() invocations in kvm_set_cr4(), enter_smm() and __set_sregs() with them accordingly. Signed-off-by: Robert Hoo <robert.hu@linux.intel.com> --- arch/x86/kvm/cpuid.c | 11 +++++++++++ arch/x86/kvm/cpuid.h | 2 ++ arch/x86/kvm/x86.c | 24 +++++++++++++++--------- 3 files changed, 28 insertions(+), 9 deletions(-)