@@ -164,9 +164,16 @@ static bool has_msr_perf_capabs;
static bool has_msr_pkrs;
static bool has_msr_hwcr;
-static uint32_t has_architectural_pmu_version;
-static uint32_t num_architectural_pmu_gp_counters;
-static uint32_t num_architectural_pmu_fixed_counters;
+/*
+ * For Intel processors, the meaning is the architectural PMU version
+ * number.
+ *
+ * For AMD processors: 1 corresponds to the prior versions, and 2
+ * corresponds to AMD PerfMonV2.
+ */
+static uint32_t pmu_version;
+static uint32_t num_pmu_gp_counters;
+static uint32_t num_pmu_fixed_counters;
static int has_xsave2;
static int has_xcrs;
@@ -2068,24 +2075,24 @@ static void kvm_init_pmu_info(struct kvm_cpuid2 *cpuid)
return;
}
- has_architectural_pmu_version = c->eax & 0xff;
- if (has_architectural_pmu_version > 0) {
- num_architectural_pmu_gp_counters = (c->eax & 0xff00) >> 8;
+ pmu_version = c->eax & 0xff;
+ if (pmu_version > 0) {
+ num_pmu_gp_counters = (c->eax & 0xff00) >> 8;
/*
* Shouldn't be more than 32, since that's the number of bits
* available in EBX to tell us _which_ counters are available.
* Play it safe.
*/
- if (num_architectural_pmu_gp_counters > MAX_GP_COUNTERS) {
- num_architectural_pmu_gp_counters = MAX_GP_COUNTERS;
+ if (num_pmu_gp_counters > MAX_GP_COUNTERS) {
+ num_pmu_gp_counters = MAX_GP_COUNTERS;
}
- if (has_architectural_pmu_version > 1) {
- num_architectural_pmu_fixed_counters = c->edx & 0x1f;
+ if (pmu_version > 1) {
+ num_pmu_fixed_counters = c->edx & 0x1f;
- if (num_architectural_pmu_fixed_counters > MAX_FIXED_COUNTERS) {
- num_architectural_pmu_fixed_counters = MAX_FIXED_COUNTERS;
+ if (num_pmu_fixed_counters > MAX_FIXED_COUNTERS) {
+ num_pmu_fixed_counters = MAX_FIXED_COUNTERS;
}
}
}
@@ -4037,25 +4044,25 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, env->poll_control_msr);
}
- if (has_architectural_pmu_version > 0) {
- if (has_architectural_pmu_version > 1) {
+ if (pmu_version > 0) {
+ if (pmu_version > 1) {
/* Stop the counter. */
kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
}
/* Set the counter values. */
- for (i = 0; i < num_architectural_pmu_fixed_counters; i++) {
+ for (i = 0; i < num_pmu_fixed_counters; i++) {
kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i,
env->msr_fixed_counters[i]);
}
- for (i = 0; i < num_architectural_pmu_gp_counters; i++) {
+ for (i = 0; i < num_pmu_gp_counters; i++) {
kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i,
env->msr_gp_counters[i]);
kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i,
env->msr_gp_evtsel[i]);
}
- if (has_architectural_pmu_version > 1) {
+ if (pmu_version > 1) {
kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS,
env->msr_global_status);
kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
@@ -4515,17 +4522,17 @@ static int kvm_get_msrs(X86CPU *cpu)
if (env->features[FEAT_KVM] & CPUID_KVM_POLL_CONTROL) {
kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, 1);
}
- if (has_architectural_pmu_version > 0) {
- if (has_architectural_pmu_version > 1) {
+ if (pmu_version > 0) {
+ if (pmu_version > 1) {
kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0);
kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0);
}
- for (i = 0; i < num_architectural_pmu_fixed_counters; i++) {
+ for (i = 0; i < num_pmu_fixed_counters; i++) {
kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 0);
}
- for (i = 0; i < num_architectural_pmu_gp_counters; i++) {
+ for (i = 0; i < num_pmu_gp_counters; i++) {
kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 0);
kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 0);
}