diff mbox series

[v4,3/7] kvm: x86/pmu: prepare the pmu event filter for masked events

Message ID 20220831162124.947028-4-aaronlewis@google.com (mailing list archive)
State New, archived
Headers show
Series Introduce and test masked events | expand

Commit Message

Aaron Lewis Aug. 31, 2022, 4:21 p.m. UTC
Create an internal representation for filter events to abstract the
events userspace uses from the events the kernel uses.  That will allow
the kernel to use a common event and a common code path between the
different types of filter events used in userspace once masked events
are introduced.

No functional changes intended

Signed-off-by: Aaron Lewis <aaronlewis@google.com>
---
 arch/x86/kvm/pmu.c | 118 ++++++++++++++++++++++++++++++++-------------
 arch/x86/kvm/pmu.h |  16 ++++++
 2 files changed, 100 insertions(+), 34 deletions(-)

Comments

Jim Mattson Sept. 14, 2022, 6:25 p.m. UTC | #1
On Wed, Aug 31, 2022 at 9:21 AM Aaron Lewis <aaronlewis@google.com> wrote:
>
> Create an internal representation for filter events to abstract the
> events userspace uses from the events the kernel uses.  That will allow
> the kernel to use a common event and a common code path between the
> different types of filter events used in userspace once masked events
> are introduced.
>
> No functional changes intended
>
> Signed-off-by: Aaron Lewis <aaronlewis@google.com>
> ---
>  arch/x86/kvm/pmu.c | 118 ++++++++++++++++++++++++++++++++-------------
>  arch/x86/kvm/pmu.h |  16 ++++++
>  2 files changed, 100 insertions(+), 34 deletions(-)
>
> diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
> index e7d94e6b7f28..50a36cc5bfd0 100644
> --- a/arch/x86/kvm/pmu.c
> +++ b/arch/x86/kvm/pmu.c
> @@ -239,6 +239,19 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc)
>         return true;
>  }
>
> +static inline u16 get_event_select(u64 eventsel)
> +{
> +       u64 e = eventsel &
> +               static_call(kvm_x86_pmu_get_eventsel_event_mask)();
> +
> +       return (e & ARCH_PERFMON_EVENTSEL_EVENT) | ((e >> 24) & 0xF00ULL);
> +}
> +
> +static inline u8 get_unit_mask(u64 eventsel)
> +{
> +       return (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
> +}
> +
>  static int cmp_u64(const void *pa, const void *pb)
>  {
>         u64 a = *(u64 *)pa;
> @@ -247,53 +260,63 @@ static int cmp_u64(const void *pa, const void *pb)
>         return (a > b) - (a < b);
>  }
>
> -static inline u64 get_event_select(u64 eventsel)
> +static u64 *find_filter_entry(struct kvm_pmu_event_filter *filter, u64 key)
> +{
> +       return bsearch(&key, filter->events, filter->nevents,
> +                         sizeof(filter->events[0]), cmp_u64);
> +}
> +
> +static bool filter_contains_match(struct kvm_pmu_event_filter *filter,
> +                                 u64 eventsel)
> +{
> +       u16 event_select = get_event_select(eventsel);
> +       u8 unit_mask = get_unit_mask(eventsel);
> +       u64 key;
> +
> +       key = KVM_PMU_ENCODE_FILTER_ENTRY(event_select, unit_mask);
> +       if (find_filter_entry(filter, key))
> +               return true;
> +       return false;

Perhaps just:
        return find_filter_entry(filter, key);

Reviewed-by: Jim Mattson <jmattson@google.com>
diff mbox series

Patch

diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index e7d94e6b7f28..50a36cc5bfd0 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -239,6 +239,19 @@  static bool pmc_resume_counter(struct kvm_pmc *pmc)
 	return true;
 }
 
+static inline u16 get_event_select(u64 eventsel)
+{
+	u64 e = eventsel &
+		static_call(kvm_x86_pmu_get_eventsel_event_mask)();
+
+	return (e & ARCH_PERFMON_EVENTSEL_EVENT) | ((e >> 24) & 0xF00ULL);
+}
+
+static inline u8 get_unit_mask(u64 eventsel)
+{
+	return (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
+}
+
 static int cmp_u64(const void *pa, const void *pb)
 {
 	u64 a = *(u64 *)pa;
@@ -247,53 +260,63 @@  static int cmp_u64(const void *pa, const void *pb)
 	return (a > b) - (a < b);
 }
 
-static inline u64 get_event_select(u64 eventsel)
+static u64 *find_filter_entry(struct kvm_pmu_event_filter *filter, u64 key)
+{
+	return bsearch(&key, filter->events, filter->nevents,
+			  sizeof(filter->events[0]), cmp_u64);
+}
+
+static bool filter_contains_match(struct kvm_pmu_event_filter *filter,
+				  u64 eventsel)
+{
+	u16 event_select = get_event_select(eventsel);
+	u8 unit_mask = get_unit_mask(eventsel);
+	u64 key;
+
+	key = KVM_PMU_ENCODE_FILTER_ENTRY(event_select, unit_mask);
+	if (find_filter_entry(filter, key))
+		return true;
+	return false;
+}
+
+static bool is_gp_event_allowed(struct kvm_pmu_event_filter *filter, u64 eventsel)
 {
-	return eventsel & static_call(kvm_x86_pmu_get_eventsel_event_mask)();
+	if (filter_contains_match(filter, eventsel))
+		return filter->action == KVM_PMU_EVENT_ALLOW;
+
+	return filter->action == KVM_PMU_EVENT_DENY;
 }
 
-static inline u64 get_raw_event(u64 eventsel)
+static bool is_fixed_event_allowed(struct kvm_pmu_event_filter *filter, int idx)
 {
-	u64 event_select = get_event_select(eventsel);
-	u64 unit_mask = eventsel & ARCH_PERFMON_EVENTSEL_UMASK;
+	int fixed_idx = idx - INTEL_PMC_IDX_FIXED;
 
-	return event_select | unit_mask;
+	if (filter->action == KVM_PMU_EVENT_DENY &&
+	    test_bit(fixed_idx, (ulong *)&filter->fixed_counter_bitmap))
+		return false;
+	if (filter->action == KVM_PMU_EVENT_ALLOW &&
+	    !test_bit(fixed_idx, (ulong *)&filter->fixed_counter_bitmap))
+		return false;
+
+	return true;
 }
 
 static bool check_pmu_event_filter(struct kvm_pmc *pmc)
 {
 	struct kvm_pmu_event_filter *filter;
 	struct kvm *kvm = pmc->vcpu->kvm;
-	bool allow_event = true;
-	__u64 key;
-	int idx;
 
 	if (!static_call(kvm_x86_pmu_hw_event_available)(pmc))
 		return false;
 
 	filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
 	if (!filter)
-		goto out;
+		return true;
 
-	if (pmc_is_gp(pmc)) {
-		key = get_raw_event(pmc->eventsel);
-		if (bsearch(&key, filter->events, filter->nevents,
-			    sizeof(__u64), cmp_u64))
-			allow_event = filter->action == KVM_PMU_EVENT_ALLOW;
-		else
-			allow_event = filter->action == KVM_PMU_EVENT_DENY;
-	} else {
-		idx = pmc->idx - INTEL_PMC_IDX_FIXED;
-		if (filter->action == KVM_PMU_EVENT_DENY &&
-		    test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
-			allow_event = false;
-		if (filter->action == KVM_PMU_EVENT_ALLOW &&
-		    !test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
-			allow_event = false;
-	}
+	if (pmc_is_gp(pmc))
+		return is_gp_event_allowed(filter, pmc->eventsel);
 
-out:
-	return allow_event;
+	return is_fixed_event_allowed(filter, pmc->idx);
 }
 
 void reprogram_counter(struct kvm_pmc *pmc)
@@ -609,6 +632,38 @@  static void remove_invalid_raw_events(struct kvm_pmu_event_filter *filter)
 	filter->nevents = j;
 }
 
+static inline u64 encode_filter_entry(u64 event)
+{
+	u16 event_select = get_event_select(event);
+	u8 unit_mask = get_unit_mask(event);
+
+	return KVM_PMU_ENCODE_FILTER_ENTRY(event_select, unit_mask);
+}
+
+static void convert_to_filter_events(struct kvm_pmu_event_filter *filter)
+{
+	int i;
+
+	for (i = 0; i < filter->nevents; i++) {
+		u64 e = filter->events[i];
+
+		filter->events[i] = encode_filter_entry(e);
+	}
+}
+
+static void prepare_filter_events(struct kvm_pmu_event_filter *filter)
+{
+	remove_invalid_raw_events(filter);
+
+	convert_to_filter_events(filter);
+
+	/*
+	 * Sort the in-kernel list so that we can search it with bsearch.
+	 */
+	sort(&filter->events, filter->nevents, sizeof(filter->events[0]),
+	     cmp_u64, NULL);
+}
+
 int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
 {
 	struct kvm_pmu_event_filter tmp, *filter;
@@ -640,12 +695,7 @@  int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
 	/* Ensure nevents can't be changed between the user copies. */
 	*filter = tmp;
 
-	remove_invalid_raw_events(filter);
-
-	/*
-	 * Sort the in-kernel list so that we can search it with bsearch.
-	 */
-	sort(&filter->events, filter->nevents, sizeof(__u64), cmp_u64, NULL);
+	prepare_filter_events(filter);
 
 	mutex_lock(&kvm->lock);
 	filter = rcu_replace_pointer(kvm->arch.pmu_event_filter, filter,
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index 4e22f9f55400..df4f81e5c685 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -205,4 +205,20 @@  bool is_vmware_backdoor_pmc(u32 pmc_idx);
 
 extern struct kvm_pmu_ops intel_pmu_ops;
 extern struct kvm_pmu_ops amd_pmu_ops;
+
+struct kvm_pmu_filter_entry {
+	union {
+		u64 raw;
+		struct {
+			u64 event_select:12;
+			u64 unit_mask:8;
+			u64 rsvd:44;
+		};
+	};
+};
+
+#define KVM_PMU_ENCODE_FILTER_ENTRY(event_select, unit_mask) \
+	(((event_select) & 0xFFFULL) | \
+	(((unit_mask) & 0xFFULL) << 12))
+
 #endif /* __KVM_X86_PMU_H */