@@ -577,6 +577,38 @@ void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id)
}
EXPORT_SYMBOL_GPL(kvm_pmu_trigger_event);
+static inline u64 get_event_filter_mask(void)
+{
+ u64 event_select_mask =
+ static_call(kvm_x86_pmu_get_eventsel_event_mask)();
+
+ return event_select_mask | ARCH_PERFMON_EVENTSEL_UMASK;
+}
+
+static inline bool is_event_valid(u64 event, u64 mask)
+{
+ return !(event & ~mask);
+}
+
+static void remove_invalid_raw_events(struct kvm_pmu_event_filter *filter)
+{
+ u64 raw_mask;
+ int i, j;
+
+ if (filter->flags)
+ return;
+
+ raw_mask = get_event_filter_mask();
+ for (i = 0, j = 0; i < filter->nevents; i++) {
+ u64 raw_event = filter->events[i];
+
+ if (is_event_valid(raw_event, raw_mask))
+ filter->events[j++] = raw_event;
+ }
+
+ filter->nevents = j;
+}
+
int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
{
struct kvm_pmu_event_filter tmp, *filter;
@@ -608,6 +640,8 @@ int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
/* Ensure nevents can't be changed between the user copies. */
*filter = tmp;
+ remove_invalid_raw_events(filter);
+
/*
* Sort the in-kernel list so that we can search it with bsearch.
*/
If a raw event is invalid, i.e. bits set outside the event select + unit mask, the event will never match the search, so it's pointless to have it in the list. Opt for a shorter list by removing invalid raw events. Signed-off-by: Aaron Lewis <aaronlewis@google.com> --- arch/x86/kvm/pmu.c | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+)