@@ -262,7 +262,7 @@ void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
pmc->current_config = (u64)ctrl;
pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
- kvm_x86_ops.pmu_ops->find_fixed_event(idx),
+ kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc),
!(en_field & 0x2), /* exclude user */
!(en_field & 0x1), /* exclude kernel */
pmi, false, false);
@@ -25,7 +25,6 @@ struct kvm_event_hw_type_mapping {
struct kvm_pmu_ops {
unsigned int (*pmc_perf_hw_id)(struct kvm_pmc *pmc);
- unsigned (*find_fixed_event)(int idx);
bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu,
@@ -140,6 +140,10 @@ static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
int i;
+ /* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */
+ if (WARN_ON(pmc_is_fixed(pmc)))
+ return PERF_COUNT_HW_MAX;
+
for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
if (amd_event_mapping[i].eventsel == event_select
&& amd_event_mapping[i].unit_mask == unit_mask)
@@ -151,12 +155,6 @@ static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
return amd_event_mapping[i].event_type;
}
-/* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */
-static unsigned amd_find_fixed_event(int idx)
-{
- return PERF_COUNT_HW_MAX;
-}
-
/* check if a PMC is enabled by comparing it against global_ctrl bits. Because
* AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE).
*/
@@ -320,7 +318,6 @@ static void amd_pmu_reset(struct kvm_vcpu *vcpu)
struct kvm_pmu_ops amd_pmu_ops = {
.pmc_perf_hw_id = amd_pmc_perf_hw_id,
- .find_fixed_event = amd_find_fixed_event,
.pmc_is_enabled = amd_pmc_is_enabled,
.pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
.rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc,
@@ -68,6 +68,19 @@ static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
reprogram_counter(pmu, bit);
}
+static inline unsigned int intel_find_fixed_event(int idx)
+{
+ u32 event;
+ size_t size = ARRAY_SIZE(fixed_pmc_events);
+
+ if (idx >= size)
+ return PERF_COUNT_HW_MAX;
+
+ event = fixed_pmc_events[array_index_nospec(idx, size)];
+ return intel_arch_events[event].event_type;
+}
+
+
static unsigned int intel_pmc_perf_hw_id(struct kvm_pmc *pmc)
{
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
@@ -75,6 +88,9 @@ static unsigned int intel_pmc_perf_hw_id(struct kvm_pmc *pmc)
u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
int i;
+ if (pmc_is_fixed(pmc))
+ return intel_find_fixed_event(pmc->idx - INTEL_PMC_IDX_FIXED);
+
for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
if (intel_arch_events[i].eventsel == event_select
&& intel_arch_events[i].unit_mask == unit_mask
@@ -87,18 +103,6 @@ static unsigned int intel_pmc_perf_hw_id(struct kvm_pmc *pmc)
return intel_arch_events[i].event_type;
}
-static unsigned intel_find_fixed_event(int idx)
-{
- u32 event;
- size_t size = ARRAY_SIZE(fixed_pmc_events);
-
- if (idx >= size)
- return PERF_COUNT_HW_MAX;
-
- event = fixed_pmc_events[array_index_nospec(idx, size)];
- return intel_arch_events[event].event_type;
-}
-
/* check if a PMC is enabled by comparing it with globl_ctrl bits. */
static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
{
@@ -721,7 +725,6 @@ static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
struct kvm_pmu_ops intel_pmu_ops = {
.pmc_perf_hw_id = intel_pmc_perf_hw_id,
- .find_fixed_event = intel_find_fixed_event,
.pmc_is_enabled = intel_pmc_is_enabled,
.pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
.rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc,