@@ -355,6 +355,9 @@ static u32 cpuid_get_reg_unsafe(struct kvm_cpuid_entry2 *entry, u32 reg)
}
}
+static int cpuid_func_emulated(struct kvm_cpuid_entry2 *entry, u32 func,
+ bool include_partially_emulated);
+
void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
@@ -373,6 +376,7 @@ void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
*/
for (i = 0; i < NR_KVM_CPU_CAPS; i++) {
const struct cpuid_reg cpuid = reverse_cpuid[i];
+ struct kvm_cpuid_entry2 emulated;
if (!cpuid.function)
continue;
@@ -381,7 +385,16 @@ void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
if (!entry)
continue;
- vcpu->arch.cpu_caps[i] = cpuid_get_reg_unsafe(entry, cpuid.reg);
+ cpuid_func_emulated(&emulated, cpuid.function, true);
+
+ /*
+ * A vCPU has a feature if it's supported by KVM and is enabled
+ * in guest CPUID. Note, this includes features that are
+ * supported by KVM but aren't advertised to userspace!
+ */
+ vcpu->arch.cpu_caps[i] = kvm_cpu_caps[i] |
+ cpuid_get_reg_unsafe(&emulated, cpuid.reg);
+ vcpu->arch.cpu_caps[i] &= cpuid_get_reg_unsafe(entry, cpuid.reg);
}
kvm_update_cpuid_runtime(vcpu);
@@ -263,13 +263,6 @@ static __always_inline void guest_cpu_cap_change(struct kvm_vcpu *vcpu,
guest_cpu_cap_clear(vcpu, x86_feature);
}
-static __always_inline void guest_cpu_cap_constrain(struct kvm_vcpu *vcpu,
- unsigned int x86_feature)
-{
- if (!kvm_cpu_cap_has(x86_feature))
- guest_cpu_cap_clear(vcpu, x86_feature);
-}
-
static __always_inline bool guest_cpu_cap_has(struct kvm_vcpu *vcpu,
unsigned int x86_feature)
{
@@ -4406,10 +4406,6 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
boot_cpu_has(X86_FEATURE_XSAVES) &&
guest_cpuid_has(vcpu, X86_FEATURE_XSAVE));
- guest_cpu_cap_constrain(vcpu, X86_FEATURE_NRIPS);
- guest_cpu_cap_constrain(vcpu, X86_FEATURE_TSCRATEMSR);
- guest_cpu_cap_constrain(vcpu, X86_FEATURE_LBRV);
-
/*
* Intercept VMLOAD if the vCPU model is Intel in order to emulate that
* VMLOAD drops bits 63:32 of SYSENTER (ignoring the fact that exposing
@@ -4417,13 +4413,6 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
*/
if (guest_cpuid_is_intel_compatible(vcpu))
guest_cpu_cap_clear(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD);
- else
- guest_cpu_cap_constrain(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD);
-
- guest_cpu_cap_constrain(vcpu, X86_FEATURE_PAUSEFILTER);
- guest_cpu_cap_constrain(vcpu, X86_FEATURE_PFTHRESHOLD);
- guest_cpu_cap_constrain(vcpu, X86_FEATURE_VGIF);
- guest_cpu_cap_constrain(vcpu, X86_FEATURE_VNMI);
svm_recalc_instruction_intercepts(vcpu, svm);
@@ -7828,15 +7828,10 @@ void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
* to the guest. XSAVES depends on CR4.OSXSAVE, and CR4.OSXSAVE can be
* set if and only if XSAVE is supported.
*/
- if (boot_cpu_has(X86_FEATURE_XSAVE) &&
- guest_cpuid_has(vcpu, X86_FEATURE_XSAVE))
- guest_cpu_cap_constrain(vcpu, X86_FEATURE_XSAVES);
- else
+ if (!boot_cpu_has(X86_FEATURE_XSAVE) ||
+ !guest_cpuid_has(vcpu, X86_FEATURE_XSAVE))
guest_cpu_cap_clear(vcpu, X86_FEATURE_XSAVES);
- guest_cpu_cap_constrain(vcpu, X86_FEATURE_VMX);
- guest_cpu_cap_constrain(vcpu, X86_FEATURE_LAM);
-
vmx_setup_uret_msrs(vmx);
if (cpu_has_secondary_exec_ctrls())
Constrain all guest cpu_caps based on KVM support instead of constraining only the few features that KVM _currently_ needs to verify are actually supported by KVM. The intent of cpu_caps is to track what the guest is actually capable of using, not the raw, unfiltered CPUID values that the guest sees. I.e. KVM should always consult it's only support when making decisions based on guest CPUID, and the only reason KVM has historically made the checks opt-in was due to lack of centralized tracking. Suggested-by: Maxim Levitsky <mlevitsk@redhat.com> Signed-off-by: Sean Christopherson <seanjc@google.com> --- arch/x86/kvm/cpuid.c | 15 ++++++++++++++- arch/x86/kvm/cpuid.h | 7 ------- arch/x86/kvm/svm/svm.c | 11 ----------- arch/x86/kvm/vmx/vmx.c | 9 ++------- 4 files changed, 16 insertions(+), 26 deletions(-)