diff mbox

[V6,2/2] kvm: ignore LBR and extra rsp

Message ID 1405365957-20202-2-git-send-email-kan.liang@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

kan.liang@intel.com July 14, 2014, 7:25 p.m. UTC
From: Kan Liang <kan.liang@intel.com>

With -cpu host KVM reports LBR and extra_regs support, so the perf driver may
accesses the LBR and extra_regs MSRs.
However, there is no LBR and extra_regs virtualization support yet. This could
causes guest to crash.
As a workaround, KVM just simply ignore the LBR and extra_regs MSRs to lie the
guest.

For reproducing the issue, please build the kernel with CONFIG_KVM_INTEL = y
(for host kernel).
And CONFIG_PARAVIRT = n and CONFIG_KVM_GUEST = n (for guest kernel).
Start the guest with -cpu host.
Run perf record with --branch-any or --branch-filter in guest to trigger LBR
Run perf stat offcore events (E.g. LLC-loads/LLC-load-misses ...) in guest to
trigger offcore_rsp #GP

Signed-off-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Kan Liang <kan.liang@intel.com>

---
V3: add MSR_LBR_TOS
V4: add MSR_LBR_SELECT and MSR_PEBS_LD_LAT_THRESHOLD
V5: set_msr should return 0 to lie the guest

 arch/x86/kvm/pmu.c | 38 ++++++++++++++++++++++++++++++++++++++
 1 file changed, 38 insertions(+)
diff mbox

Patch

diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index cbecaa9..5fd5b44 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -331,6 +331,18 @@  bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr)
 	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
 		ret = pmu->version > 1;
 		break;
+	case MSR_OFFCORE_RSP_0:
+	case MSR_OFFCORE_RSP_1:
+	case MSR_LBR_SELECT:
+	case MSR_PEBS_LD_LAT_THRESHOLD:
+	case MSR_LBR_TOS:
+	/* At most 8-deep LBR for core and atom */
+	case MSR_LBR_CORE_FROM ... MSR_LBR_CORE_FROM + 7:
+	case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 7:
+	/* 16-deep LBR for core i3/i5/i7 series processors */
+	case MSR_LBR_NHM_FROM ... MSR_LBR_NHM_FROM + 15:
+	case MSR_LBR_NHM_TO ... MSR_LBR_NHM_TO + 15:
+		return 1; /* to avoid crashes */
 	default:
 		ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)
 			|| get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0)
@@ -358,6 +370,19 @@  int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
 	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
 		*data = pmu->global_ovf_ctrl;
 		return 0;
+	case MSR_OFFCORE_RSP_0:
+	case MSR_OFFCORE_RSP_1:
+	case MSR_LBR_SELECT:
+	case MSR_PEBS_LD_LAT_THRESHOLD:
+	case MSR_LBR_TOS:
+	/* At most 8-deep LBR for core and atom */
+	case MSR_LBR_CORE_FROM ... MSR_LBR_CORE_FROM + 7:
+	case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 7:
+	/* 16-deep LBR for core i3/i5/i7 series processors */
+	case MSR_LBR_NHM_FROM ... MSR_LBR_NHM_FROM + 15:
+	case MSR_LBR_NHM_TO ... MSR_LBR_NHM_TO + 15:
+		*data = 0;
+		return 0;
 	default:
 		if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
 				(pmc = get_fixed_pmc(pmu, index))) {
@@ -409,6 +434,19 @@  int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 			return 0;
 		}
 		break;
+	case MSR_OFFCORE_RSP_0:
+	case MSR_OFFCORE_RSP_1:
+	case MSR_LBR_SELECT:
+	case MSR_PEBS_LD_LAT_THRESHOLD:
+	case MSR_LBR_TOS:
+	/* At most 8-deep LBR for core and atom */
+	case MSR_LBR_CORE_FROM ... MSR_LBR_CORE_FROM + 7:
+	case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 7:
+	/* 16-deep LBR for core i3/i5/i7 series processors */
+	case MSR_LBR_NHM_FROM ... MSR_LBR_NHM_FROM + 15:
+	case MSR_LBR_NHM_TO ... MSR_LBR_NHM_TO + 15:
+		/* dummy for now */
+		return 0;
 	default:
 		if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
 				(pmc = get_fixed_pmc(pmu, index))) {