@@ -19,6 +19,9 @@
#include "lapic.h"
#include "pmu.h"
+struct kvm_event_hw_type_mapping kernel_arch_events[PERF_COUNT_HW_MAX];
+EXPORT_SYMBOL_GPL(kernel_arch_events);
+
/* This is enough to filter the vast majority of currently defined events. */
#define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300
@@ -217,7 +220,9 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
- if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
+ /* Fall back to PERF_TYPE_RAW mode if event_select and unit_mask are both 0. */
+ if ((event_select | unit_mask) &&
+ !(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
ARCH_PERFMON_EVENTSEL_INV |
ARCH_PERFMON_EVENTSEL_CMASK |
HSW_IN_TX |
@@ -499,6 +504,23 @@ void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
kvm_pmu_reset(vcpu);
}
+/* Initialize common hardware events mapping based on enum perf_hw_id. */
+void kvm_pmu_hw_events_mapping_setup(void)
+{
+ u64 config;
+ int i;
+
+ for (i = 0; i < PERF_COUNT_HW_MAX; i++) {
+ config = perf_get_hw_event_config(i) & 0xFFFFULL;
+
+ kernel_arch_events[i] = (struct kvm_event_hw_type_mapping){
+ .eventsel = config & ARCH_PERFMON_EVENTSEL_EVENT,
+ .unit_mask = (config & ARCH_PERFMON_EVENTSEL_UMASK) >> 8,
+ .event_type = i,
+ };
+ }
+}
+
int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
{
struct kvm_pmu_event_filter tmp, *filter;
@@ -160,8 +160,10 @@ void kvm_pmu_cleanup(struct kvm_vcpu *vcpu);
void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp);
+void kvm_pmu_hw_events_mapping_setup(void);
bool is_vmware_backdoor_pmc(u32 pmc_idx);
extern struct kvm_pmu_ops intel_pmu_ops;
extern struct kvm_pmu_ops amd_pmu_ops;
+extern struct kvm_event_hw_type_mapping kernel_arch_events[];
#endif /* __KVM_X86_PMU_H */
@@ -32,18 +32,6 @@ enum index {
INDEX_ERROR,
};
-/* duplicated from amd_perfmon_event_map, K7 and above should work. */
-static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
- [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
- [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
- [2] = { 0x7d, 0x07, PERF_COUNT_HW_CACHE_REFERENCES },
- [3] = { 0x7e, 0x07, PERF_COUNT_HW_CACHE_MISSES },
- [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
- [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
- [6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
- [7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
-};
-
static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
{
struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
@@ -140,15 +128,15 @@ static unsigned amd_find_arch_event(struct kvm_pmu *pmu,
{
int i;
- for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
- if (amd_event_mapping[i].eventsel == event_select
- && amd_event_mapping[i].unit_mask == unit_mask)
+ for (i = 0; i < PERF_COUNT_HW_MAX; i++)
+ if (kernel_arch_events[i].eventsel == event_select &&
+ kernel_arch_events[i].unit_mask == unit_mask)
break;
- if (i == ARRAY_SIZE(amd_event_mapping))
+ if (i == PERF_COUNT_HW_MAX)
return PERF_COUNT_HW_MAX;
- return amd_event_mapping[i].event_type;
+ return kernel_arch_events[i].event_type;
}
/* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */
@@ -20,20 +20,14 @@
#define MSR_PMC_FULL_WIDTH_BIT (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0)
-static struct kvm_event_hw_type_mapping intel_arch_events[] = {
- [0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
- [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
- [2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES },
- [3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
- [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
- [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
- [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
- /* The above index must match CPUID 0x0A.EBX bit vector */
- [7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES },
-};
-
-/* mapping between fixed pmc index and intel_arch_events array */
-static int fixed_pmc_events[] = {1, 0, 7};
+/*
+ * mapping between fixed pmc index and kernel_arch_events array
+ *
+ * PERF_COUNT_HW_INSTRUCTIONS
+ * PERF_COUNT_HW_CPU_CYCLES
+ * PERF_COUNT_HW_REF_CPU_CYCLES
+ */
+static int fixed_pmc_events[] = {1, 0, 9};
static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
{
@@ -90,9 +84,9 @@ static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
{
int i;
- for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++) {
- if (intel_arch_events[i].eventsel != event_select ||
- intel_arch_events[i].unit_mask != unit_mask)
+ for (i = 0; i < PERF_COUNT_HW_MAX; i++) {
+ if (kernel_arch_events[i].eventsel != event_select ||
+ kernel_arch_events[i].unit_mask != unit_mask)
continue;
if (is_intel_cpuid_event(event_select, unit_mask) &&
@@ -102,10 +96,10 @@ static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
break;
}
- if (i == ARRAY_SIZE(intel_arch_events))
+ if (i == PERF_COUNT_HW_MAX)
return PERF_COUNT_HW_MAX;
- return intel_arch_events[i].event_type;
+ return kernel_arch_events[i].event_type;
}
static unsigned int intel_find_fixed_event(struct kvm_pmu *pmu, int idx)
@@ -120,9 +114,9 @@ static unsigned int intel_find_fixed_event(struct kvm_pmu *pmu, int idx)
event = fixed_pmc_events[array_index_nospec(idx, size)];
- event_select = intel_arch_events[event].eventsel;
- unit_mask = intel_arch_events[event].unit_mask;
- event_type = intel_arch_events[event].event_type;
+ event_select = kernel_arch_events[event].eventsel;
+ unit_mask = kernel_arch_events[event].unit_mask;
+ event_type = kernel_arch_events[event].event_type;
if (is_intel_cpuid_event(event_select, unit_mask) &&
!test_bit(event_type, pmu->avail_cpuid_events))
@@ -493,6 +487,33 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1;
}
+static inline int get_perf_hw_id_from_cpuid_idx(int bit)
+{
+ switch (bit) {
+ case 0:
+ case 1:
+ return bit;
+ case 2:
+ return PERF_COUNT_HW_BUS_CYCLES;
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ return --bit;
+ }
+
+ return PERF_COUNT_HW_MAX;
+}
+
+static inline void setup_available_kernel_arch_events(struct kvm_pmu *pmu,
+ unsigned int avail_cpuid_events, unsigned int mask_length)
+{
+ int bit;
+
+ for_each_set_bit(bit, (unsigned long *)&avail_cpuid_events, mask_length)
+ __set_bit(get_perf_hw_id_from_cpuid_idx(bit), pmu->avail_cpuid_events);
+}
+
static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
@@ -529,9 +550,8 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
eax.split.mask_length = min_t(int, eax.split.mask_length, x86_pmu.events_mask_len);
avail_cpuid_events = ~entry->ebx & ((1ull << eax.split.mask_length) - 1);
- bitmap_copy(pmu->avail_cpuid_events,
- (unsigned long *)&avail_cpuid_events,
- eax.split.mask_length);
+ setup_available_kernel_arch_events(pmu, avail_cpuid_events,
+ eax.split.mask_length);
if (pmu->version == 1) {
pmu->nr_arch_fixed_counters = 0;
@@ -11317,6 +11317,7 @@ int kvm_arch_hardware_setup(void *opaque)
memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops));
kvm_ops_static_call_update();
+ kvm_pmu_hw_events_mapping_setup();
if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES))
supported_xss = 0;