@@ -23,16 +23,6 @@ enum pmu_type {
PMU_TYPE_EVNTSEL,
};
-enum index {
- INDEX_ZERO = 0,
- INDEX_ONE,
- INDEX_TWO,
- INDEX_THREE,
- INDEX_FOUR,
- INDEX_FIVE,
- INDEX_ERROR,
-};
-
/* duplicated from amd_perfmon_event_map, K7 and above should work. */
static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
[0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
@@ -55,11 +45,9 @@ static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
return &pmu->gp_counters[array_index_nospec(pmc_idx, num_counters)];
}
-static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
+static u32 get_msr_base(bool core_ctr, enum pmu_type type)
{
- struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
-
- if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
+ if (core_ctr) {
if (type == PMU_TYPE_COUNTER)
return MSR_F15H_PERF_CTR;
else
@@ -72,77 +60,29 @@ static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
}
}
-static enum index msr_to_index(u32 msr)
-{
- switch (msr) {
- case MSR_F15H_PERF_CTL0:
- case MSR_F15H_PERF_CTR0:
- case MSR_K7_EVNTSEL0:
- case MSR_K7_PERFCTR0:
- return INDEX_ZERO;
- case MSR_F15H_PERF_CTL1:
- case MSR_F15H_PERF_CTR1:
- case MSR_K7_EVNTSEL1:
- case MSR_K7_PERFCTR1:
- return INDEX_ONE;
- case MSR_F15H_PERF_CTL2:
- case MSR_F15H_PERF_CTR2:
- case MSR_K7_EVNTSEL2:
- case MSR_K7_PERFCTR2:
- return INDEX_TWO;
- case MSR_F15H_PERF_CTL3:
- case MSR_F15H_PERF_CTR3:
- case MSR_K7_EVNTSEL3:
- case MSR_K7_PERFCTR3:
- return INDEX_THREE;
- case MSR_F15H_PERF_CTL4:
- case MSR_F15H_PERF_CTR4:
- return INDEX_FOUR;
- case MSR_F15H_PERF_CTL5:
- case MSR_F15H_PERF_CTR5:
- return INDEX_FIVE;
- default:
- return INDEX_ERROR;
- }
-}
-
static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
enum pmu_type type)
{
struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
+ unsigned int ratio = 0;
+ unsigned int pmc_idx;
+ u32 base;
- switch (msr) {
- case MSR_F15H_PERF_CTL0:
- case MSR_F15H_PERF_CTL1:
- case MSR_F15H_PERF_CTL2:
- case MSR_F15H_PERF_CTL3:
- case MSR_F15H_PERF_CTL4:
- case MSR_F15H_PERF_CTL5:
- if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
- return NULL;
- fallthrough;
- case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
- if (type != PMU_TYPE_EVNTSEL)
- return NULL;
- break;
- case MSR_F15H_PERF_CTR0:
- case MSR_F15H_PERF_CTR1:
- case MSR_F15H_PERF_CTR2:
- case MSR_F15H_PERF_CTR3:
- case MSR_F15H_PERF_CTR4:
- case MSR_F15H_PERF_CTR5:
- if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
- return NULL;
- fallthrough;
- case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
- if (type != PMU_TYPE_COUNTER)
- return NULL;
- break;
- default:
- return NULL;
+ /* MSR_K7_* MSRs are still visible to PERFCTR_CORE guest. */
+ if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE) &&
+ msr >= MSR_F15H_PERF_CTL0 && msr <= MSR_F15H_PERF_CTR5) {
+ base = get_msr_base(true, type);
+ ratio = 2;
+ } else if (msr >= MSR_K7_EVNTSEL0 && msr <= MSR_K7_PERFCTR3) {
+ base = get_msr_base(false, type);
+ ratio = 1;
}
- return &pmu->gp_counters[msr_to_index(msr)];
+ if (!ratio || msr < base)
+ return NULL;
+
+ pmc_idx = (unsigned int)((msr - base) / ratio);
+ return amd_pmc_idx_to_pmc(pmu, pmc_idx);
}
static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)