diff mbox series

[v5,2/7] kvm: x86/pmu: Remove invalid raw events from the pmu event filter

Message ID 20220920174603.302510-3-aaronlewis@google.com (mailing list archive)
State New, archived
Headers show
Series Introduce and test masked events | expand

Commit Message

Aaron Lewis Sept. 20, 2022, 5:45 p.m. UTC
If a raw event is invalid, i.e. bits set outside the event select +
unit mask, the event will never match the search, so it's pointless
to have it in the list.  Opt for a shorter list by removing invalid
raw events.

Signed-off-by: Aaron Lewis <aaronlewis@google.com>
Reviewed-by: Jim Mattson <jmattson@google.com>
---
 arch/x86/kvm/pmu.c | 34 ++++++++++++++++++++++++++++++++++
 1 file changed, 34 insertions(+)

Comments

Sean Christopherson Oct. 7, 2022, 11:48 p.m. UTC | #1
On Tue, Sep 20, 2022, Aaron Lewis wrote:
> If a raw event is invalid, i.e. bits set outside the event select +
> unit mask, the event will never match the search, so it's pointless
> to have it in the list.  Opt for a shorter list by removing invalid
> raw events.

Please use "impossible" instead of "invalid".  While I agree they are invalid,
because this is pre-existing ABI, KVM can't treat them as invalid, i.e. can't
reject them.  My initial reaction to seeing remove_invalid_raw_events() was not
exactly PG-13 :-)

Can you also call out that because this is established uABI, KVM can't outright
reject garbage?

> Signed-off-by: Aaron Lewis <aaronlewis@google.com>
> Reviewed-by: Jim Mattson <jmattson@google.com>
> ---
>  arch/x86/kvm/pmu.c | 34 ++++++++++++++++++++++++++++++++++
>  1 file changed, 34 insertions(+)
> 
> diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
> index 98f383789579..e7d94e6b7f28 100644
> --- a/arch/x86/kvm/pmu.c
> +++ b/arch/x86/kvm/pmu.c
> @@ -577,6 +577,38 @@ void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id)
>  }
>  EXPORT_SYMBOL_GPL(kvm_pmu_trigger_event);
>  
> +static inline u64 get_event_filter_mask(void)
> +{
> +	u64 event_select_mask =
> +		static_call(kvm_x86_pmu_get_eventsel_event_mask)();
> +
> +	return event_select_mask | ARCH_PERFMON_EVENTSEL_UMASK;
> +}
> +
> +static inline bool is_event_valid(u64 event, u64 mask)
> +{
> +	return !(event & ~mask);
> +}

As a general theme, don't go crazy with short helpers that only ever have a single
user.  Having to jump around to find the definition interrupts the reader and
obfuscates simple things.  If the code is particularly complex/weird, then adding
a helper to abstract the concept is useful, but that's not the case here.

> +
> +static void remove_invalid_raw_events(struct kvm_pmu_event_filter *filter)

s/invalid/impossible, and drop the "raw".  Making up terminology when it's not
strictly necessary is always confusing.

> +{
> +	u64 raw_mask;
> +	int i, j;
> +
> +	if (filter->flags)

If I'm reading all this magic correctly, this is dead code because
kvm_vm_ioctl_set_pmu_event_filter() checks flags

	if (tmp.flags != 0)
		return -EINVAL;

and does the somehwat horrific thing of:

	/* Ensure nevents can't be changed between the user copies. */
	*filter = tmp;

> +		return;
> +
> +	raw_mask = get_event_filter_mask();
> +	for (i = 0, j = 0; i < filter->nevents; i++) {
> +		u64 raw_event = filter->events[i];

Meh, using a local variable is unecessary.

> +
> +		if (is_event_valid(raw_event, raw_mask))
> +			filter->events[j++] = raw_event;

With the helpers gone, this is simply: 

		if (filter->events[i] & ~kvm_pmu_ops.EVENTSEL_MASK)
			continue;

		filter->events[j++] = filter->events[i];

> +	}
> +
> +	filter->nevents = j;
> +}
> +
>  int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
>  {
>  	struct kvm_pmu_event_filter tmp, *filter;
> @@ -608,6 +640,8 @@ int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
>  	/* Ensure nevents can't be changed between the user copies. */
>  	*filter = tmp;

Eww.  This is so gross.  But I guess it's not really that much worse than copying
only the new bits.

Can you opportunisticaly rewrite the comment to clarify that it guards against
_all_ TOCTOU attacks on the verified data?

	/* Restore the verified state to guard against TOCTOU attacks. */
	*filter = tmp;

As a full diff:

---
 arch/x86/kvm/pmu.c | 18 +++++++++++++++++-
 1 file changed, 17 insertions(+), 1 deletion(-)

diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index d0e2c7eda65b..384cefbe20dd 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -574,6 +574,20 @@ void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id)
 }
 EXPORT_SYMBOL_GPL(kvm_pmu_trigger_event);
 
+static void remove_impossible_events(struct kvm_pmu_event_filter *filter)
+{
+	int i, j;
+
+	for (i = 0, j = 0; i < filter->nevents; i++) {
+		if (filter->events[i] & ~kvm_pmu_ops.EVENTSEL_MASK)
+			continue;
+
+		filter->events[j++] = filter->events[i];
+	}
+
+	filter->nevents = j;
+}
+
 int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
 {
 	struct kvm_pmu_event_filter tmp, *filter;
@@ -602,9 +616,11 @@ int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
 	if (copy_from_user(filter, argp, size))
 		goto cleanup;
 
-	/* Ensure nevents can't be changed between the user copies. */
+	/* Restore the verified state to guard against TOCTOU attacks. */
 	*filter = tmp;
 
+	remove_impossible_events(filter);
+
 	/*
 	 * Sort the in-kernel list so that we can search it with bsearch.
 	 */

base-commit: a5c25b1eacf50b851badf1c5cbb618094cbdf40f
--
diff mbox series

Patch

diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 98f383789579..e7d94e6b7f28 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -577,6 +577,38 @@  void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id)
 }
 EXPORT_SYMBOL_GPL(kvm_pmu_trigger_event);
 
+static inline u64 get_event_filter_mask(void)
+{
+	u64 event_select_mask =
+		static_call(kvm_x86_pmu_get_eventsel_event_mask)();
+
+	return event_select_mask | ARCH_PERFMON_EVENTSEL_UMASK;
+}
+
+static inline bool is_event_valid(u64 event, u64 mask)
+{
+	return !(event & ~mask);
+}
+
+static void remove_invalid_raw_events(struct kvm_pmu_event_filter *filter)
+{
+	u64 raw_mask;
+	int i, j;
+
+	if (filter->flags)
+		return;
+
+	raw_mask = get_event_filter_mask();
+	for (i = 0, j = 0; i < filter->nevents; i++) {
+		u64 raw_event = filter->events[i];
+
+		if (is_event_valid(raw_event, raw_mask))
+			filter->events[j++] = raw_event;
+	}
+
+	filter->nevents = j;
+}
+
 int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
 {
 	struct kvm_pmu_event_filter tmp, *filter;
@@ -608,6 +640,8 @@  int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
 	/* Ensure nevents can't be changed between the user copies. */
 	*filter = tmp;
 
+	remove_invalid_raw_events(filter);
+
 	/*
 	 * Sort the in-kernel list so that we can search it with bsearch.
 	 */