@@ -3748,6 +3748,18 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
*nr = 3;
}
+ if (arr[1].guest && x86_pmu.intel_cap.pebs_baseline) {
+ arr[3].msr = MSR_PEBS_DATA_CFG;
+ arr[3].host = cpuc->pebs_data_cfg;
+ /* KVM will update MSR_PEBS_DATA_CFG with the trapped guest value. */
+ arr[3].guest = 0ull;
+ *nr = 4;
+ } else if (*nr == 3) {
+ arr[3].msr = MSR_PEBS_DATA_CFG;
+ arr[3].host = arr[3].guest = 0;
+ *nr = 4;
+ }
+
return arr;
}
@@ -452,6 +452,8 @@ struct kvm_pmu {
u64 ds_area;
u64 pebs_enable;
u64 pebs_enable_mask;
+ u64 pebs_data_cfg;
+ u64 pebs_data_cfg_mask;
/*
* The gate to release perf_events not marked in
@@ -186,6 +186,9 @@ static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
case MSR_IA32_DS_AREA:
ret = guest_cpuid_has(vcpu, X86_FEATURE_DS);
break;
+ case MSR_PEBS_DATA_CFG:
+ ret = vcpu->arch.perf_capabilities & PERF_CAP_PEBS_BASELINE;
+ break;
default:
ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
@@ -233,6 +236,9 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_DS_AREA:
msr_info->data = pmu->ds_area;
return 0;
+ case MSR_PEBS_DATA_CFG:
+ msr_info->data = pmu->pebs_data_cfg;
+ return 0;
default:
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
(pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
@@ -305,6 +311,14 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1;
pmu->ds_area = data;
return 0;
+ case MSR_PEBS_DATA_CFG:
+ if (pmu->pebs_data_cfg == data)
+ return 0;
+ if (!(data & pmu->pebs_data_cfg_mask)) {
+ pmu->pebs_data_cfg = data;
+ return 0;
+ }
+ break;
default:
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
(pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) {
@@ -355,6 +369,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->reserved_bits = 0xffffffff00200000ull;
pmu->fixed_ctr_ctrl_mask = ~0ull;
pmu->pebs_enable_mask = ~0ull;
+ pmu->pebs_data_cfg_mask = ~0ull;
entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
if (!entry)
@@ -417,6 +432,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
pmu->fixed_ctr_ctrl_mask &=
~(1ULL << (INTEL_PMC_IDX_FIXED + i * 4));
+ pmu->pebs_data_cfg_mask = ~0xff00000full;
} else
pmu->pebs_enable_mask = ~((1ull << pmu->nr_arch_gp_counters) - 1);
} else {
@@ -6531,8 +6531,11 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
if (!msrs)
return;
- if (nr_msrs > 2 && msrs[1].guest)
+ if (nr_msrs > 2 && msrs[1].guest) {
msrs[2].guest = pmu->ds_area;
+ if (nr_msrs > 3)
+ msrs[3].guest = pmu->pebs_data_cfg;
+ }
for (i = 0; i < nr_msrs; i++)
if (msrs[i].host == msrs[i].guest)