@@ -331,6 +331,15 @@ bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr)
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
ret = pmu->version > 1;
break;
+ case MSR_OFFCORE_RSP_0:
+ case MSR_OFFCORE_RSP_1:
+ /* At most 8-deep LBR for core and atom */
+ case MSR_LBR_CORE_FROM ... MSR_LBR_CORE_FROM + 7:
+ case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 7:
+ /* 16-deep LBR for core i3/i5/i7 series processors */
+ case MSR_LBR_NHM_FROM ... MSR_LBR_NHM_FROM + 15:
+ case MSR_LBR_NHM_TO ... MSR_LBR_NHM_TO + 15:
+ return 1; /* to avoid crashes */
default:
ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)
|| get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0)
@@ -358,6 +367,16 @@ int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
*data = pmu->global_ovf_ctrl;
return 0;
+ case MSR_OFFCORE_RSP_0:
+ case MSR_OFFCORE_RSP_1:
+ /* At most 8-deep LBR for core and atom */
+ case MSR_LBR_CORE_FROM ... MSR_LBR_CORE_FROM + 7:
+ case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 7:
+ /* 16-deep LBR for core i3/i5/i7 series processors */
+ case MSR_LBR_NHM_FROM ... MSR_LBR_NHM_FROM + 15:
+ case MSR_LBR_NHM_TO ... MSR_LBR_NHM_TO + 15:
+ *data = 0;
+ return 0;
default:
if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
(pmc = get_fixed_pmc(pmu, index))) {
@@ -409,6 +428,16 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 0;
}
break;
+ case MSR_OFFCORE_RSP_0:
+ case MSR_OFFCORE_RSP_1:
+ /* At most 8-deep LBR for core and atom */
+ case MSR_LBR_CORE_FROM ... MSR_LBR_CORE_FROM + 7:
+ case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 7:
+ /* 16-deep LBR for core i3/i5/i7 series processors */
+ case MSR_LBR_NHM_FROM ... MSR_LBR_NHM_FROM + 15:
+ case MSR_LBR_NHM_TO ... MSR_LBR_NHM_TO + 15:
+ /* dummy for now */
+ break;
default:
if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
(pmc = get_fixed_pmc(pmu, index))) {