@@ -1663,6 +1663,11 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
u32 index;
switch (msr_info->index) {
+ case MSR_TEST_CTL:
+ if (!vmx->msr_test_ctl_mask)
+ return 1;
+ msr_info->data = vmx->msr_test_ctl;
+ break;
#ifdef CONFIG_X86_64
case MSR_FS_BASE:
msr_info->data = vmcs_readl(GUEST_FS_BASE);
@@ -1797,6 +1802,12 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
u32 index;
switch (msr_index) {
+ case MSR_TEST_CTL:
+ if (!vmx->msr_test_ctl_mask ||
+ (data & vmx->msr_test_ctl_mask) != data)
+ return 1;
+ vmx->msr_test_ctl = data;
+ break;
case MSR_EFER:
ret = kvm_set_msr_common(vcpu, msr_info);
break;
@@ -4106,6 +4117,16 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
}
}
+static u64 vmx_get_msr_test_ctl_mask(struct kvm_vcpu *vcpu)
+{
+ u64 mask = 0;
+
+ if (vcpu->arch.core_capability & CORE_CAP_SPLIT_LOCK_DETECT)
+ mask |= TEST_CTL_SPLIT_LOCK_DETECT;
+
+ return mask;
+}
+
static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -4114,6 +4135,8 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
vmx->rmode.vm86_active = 0;
vmx->spec_ctrl = 0;
+ vmx->msr_test_ctl = 0;
+ vmx->msr_test_ctl_mask = vmx_get_msr_test_ctl_mask(vcpu);
vcpu->arch.microcode_version = 0x100000000ULL;
vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
@@ -6313,6 +6336,23 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
msrs[i].host, false);
}
+static void atomic_switch_msr_test_ctl(struct vcpu_vmx *vmx)
+{
+ u64 host_msr_test_ctl;
+
+ if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
+ return;
+
+ host_msr_test_ctl = this_cpu_read(msr_test_ctl_cache);
+
+ if (host_msr_test_ctl == vmx->msr_test_ctl) {
+ clear_atomic_switch_msr(vmx, MSR_TEST_CTL);
+ } else {
+ add_atomic_switch_msr(vmx, MSR_TEST_CTL, vmx->msr_test_ctl,
+ host_msr_test_ctl, false);
+ }
+}
+
static void vmx_arm_hv_timer(struct vcpu_vmx *vmx, u32 val)
{
vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, val);
@@ -6421,6 +6461,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
atomic_switch_perf_msrs(vmx);
+ atomic_switch_msr_test_ctl(vmx);
+
vmx_update_hv_timer(vcpu);
/*
@@ -190,6 +190,8 @@ struct vcpu_vmx {
u64 msr_guest_kernel_gs_base;
#endif
+ u64 msr_test_ctl;
+ u64 msr_test_ctl_mask;
u64 spec_ctrl;
u32 vm_entry_controls_shadow;
@@ -1231,7 +1231,24 @@ EXPORT_SYMBOL_GPL(kvm_get_arch_capabilities);
static u64 kvm_get_core_capability(void)
{
- return 0;
+ u64 data = 0;
+
+ if (boot_cpu_has(X86_FEATURE_CORE_CAPABILITY)) {
+ rdmsrl(MSR_IA32_CORE_CAPABILITY, data);
+
+ /* mask non-virtualizable functions */
+ data &= CORE_CAP_SPLIT_LOCK_DETECT;
+ } else if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) {
+ /*
+ * There will be a list of FMS values that have split lock
+ * detection but lack the CORE CAPABILITY MSR. In this case,
+ * set CORE_CAP_SPLIT_LOCK_DETECT since we emulate
+ * MSR CORE_CAPABILITY.
+ */
+ data |= CORE_CAP_SPLIT_LOCK_DETECT;
+ }
+
+ return data;
}
static int kvm_get_msr_feature(struct kvm_msr_entry *msr)