@@ -2009,7 +2009,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1;
if (cpu_has_spec_ctrl_shadow())
- msr_info->data = vmcs_read64(IA32_SPEC_CTRL_SHADOW);
+ msr_info->data = to_vmx(vcpu)->spec_ctrl_shadow;
else
msr_info->data = to_vmx(vcpu)->spec_ctrl;
break;
@@ -2158,6 +2158,7 @@ static void vmx_set_spec_ctrl(struct kvm_vcpu *vcpu, u64 val)
vmx->spec_ctrl = val;
if (cpu_has_spec_ctrl_shadow()) {
+ vmx->spec_ctrl_shadow = val;
vmcs_write64(IA32_SPEC_CTRL_SHADOW, val);
vmx->spec_ctrl |= vcpu->kvm->arch.force_spec_ctrl_value;
@@ -4803,6 +4804,7 @@ static void init_vmcs(struct vcpu_vmx *vmx)
vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP);
if (cpu_has_spec_ctrl_shadow()) {
+ vmx->spec_ctrl_shadow = 0;
vmcs_write64(IA32_SPEC_CTRL_SHADOW, 0);
/*
@@ -7246,12 +7248,14 @@ void noinstr vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx,
return;
if (flags & VMX_RUN_SAVE_SPEC_CTRL) {
- if (cpu_has_spec_ctrl_shadow())
- vmx->spec_ctrl = (vmcs_read64(IA32_SPEC_CTRL_SHADOW) &
+ if (cpu_has_spec_ctrl_shadow()) {
+ vmx->spec_ctrl_shadow = vmcs_read64(IA32_SPEC_CTRL_SHADOW);
+ vmx->spec_ctrl = (vmx->spec_ctrl_shadow &
~vmx->vcpu.kvm->arch.force_spec_ctrl_mask) |
vmx->vcpu.kvm->arch.force_spec_ctrl_value;
- else
+ } else {
vmx->spec_ctrl = __rdmsr(MSR_IA32_SPEC_CTRL);
+ }
}
/*
@@ -281,6 +281,12 @@ struct vcpu_vmx {
#endif
u64 spec_ctrl;
+ /*
+ * Cache IA32_SPEC_CTRL_SHADOW field of VMCS, i.e., the value of
+ * MSR_IA32_SPEC_CTRL in guest's view.
+ */
+ u64 spec_ctrl_shadow;
+
u32 msr_ia32_umwait_control;
/*
This field is effectively the value of IA32_SPEC_CTRL MSR in guest's view. Cache it for nested VMX transitions. The value should be propagated between vmcs01 and vmcs02 so that across nested VMX transitions, in guest's view, IA32_SPEC_CTRL MSR won't be changed magically. IA32_SPEC_CTRL_SHADOW field may be changed by guest if IA32_SPEC_CTRL MSR is pass-thru'd to the guest. So, update the cache right after VM-exit to ensure it is always consistent with the value in guest's view. A bonus is vmx_get_msr() can return the cache directly thus no need to make a VMREAD. No functional change intended. Signed-off-by: Chao Gao <chao.gao@intel.com> --- arch/x86/kvm/vmx/vmx.c | 12 ++++++++---- arch/x86/kvm/vmx/vmx.h | 6 ++++++ 2 files changed, 14 insertions(+), 4 deletions(-)