@@ -1163,7 +1163,6 @@ struct kvm_x86_ops {
void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu,
enum exit_fastpath_completion *exit_fastpath);
- bool (*xsaves_supported)(void);
bool (*umip_emulated)(void);
bool (*pt_supported)(void);
@@ -410,7 +410,6 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function,
unsigned f_gbpages = 0;
unsigned f_lm = 0;
#endif
- unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0;
/* cpuid 1.edx */
@@ -467,7 +466,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function,
/* cpuid 0xD.1.eax */
const u32 kvm_cpuid_D_1_eax_x86_features =
- F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | f_xsaves;
+ F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES);
/* all calls to cpuid_count() should be made on the same cpu */
get_cpu();
@@ -629,6 +628,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function,
entry[i].eax &= kvm_cpuid_D_1_eax_x86_features;
cpuid_mask(&entry[i].eax, CPUID_D_1_EAX);
entry[i].ebx = 0;
+
+ kvm_x86_ops->set_supported_cpuid(&entry[i]);
+
if (entry[i].eax & (F(XSAVES)|F(XSAVEC)))
entry[i].ebx =
xstate_required_size(supported,
@@ -6054,6 +6054,9 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
static void svm_set_supported_cpuid(struct kvm_cpuid_entry2 *entry)
{
+ if (entry->index)
+ return;
+
switch (entry->function) {
case 0x1:
if (avic)
@@ -6096,11 +6099,6 @@ static int svm_get_lpage_level(void)
return PT_PDPE_LEVEL;
}
-static bool svm_xsaves_supported(void)
-{
- return boot_cpu_has(X86_FEATURE_XSAVES);
-}
-
static bool svm_umip_emulated(void)
{
return false;
@@ -7471,7 +7469,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.cpuid_update = svm_cpuid_update,
- .xsaves_supported = svm_xsaves_supported,
.umip_emulated = svm_umip_emulated,
.pt_supported = svm_pt_supported,
@@ -7131,6 +7131,14 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
static void vmx_set_supported_cpuid(struct kvm_cpuid_entry2 *entry)
{
+ if (entry->index) {
+ if (WARN_ON_ONCE(entry->function != 0xd || entry->index != 1))
+ return;
+ if (!vmx_xsaves_supported())
+ cpuid_entry_clear(entry, X86_FEATURE_XSAVES);
+ return;
+ }
+
switch (entry->function) {
case 0x1:
if (nested)
@@ -7899,7 +7907,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.check_intercept = vmx_check_intercept,
.handle_exit_irqoff = vmx_handle_exit_irqoff,
- .xsaves_supported = vmx_xsaves_supported,
.umip_emulated = vmx_umip_emulated,
.pt_supported = vmx_pt_supported,
Move the clearing of the XSAVES CPUID bit into VMX, which has a separate VMCS control to enable XSAVES in non-root, to to eliminate an instance of the undesirable "unsigned f_* = *_supported ? F(*) : 0" pattern in the common CPUID handling code. Add a call to ->set_supported_cpuid() in the leaf 0xD handling so that vendor code can update feature bits in the index=1 sub-leaf (which contains the XSAVES bit) and teach {svm,vmx}_set_supported_cpuid() how to handle non-zero indices. Drop ->xsaves_supported() since CPUID adjustment was the only user. No functional change intended. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> --- arch/x86/include/asm/kvm_host.h | 1 - arch/x86/kvm/cpuid.c | 6 ++++-- arch/x86/kvm/svm.c | 9 +++------ arch/x86/kvm/vmx/vmx.c | 9 ++++++++- 4 files changed, 15 insertions(+), 10 deletions(-)