diff mbox series

[v4,3/6] KVM: selftests: Introduce __kvm_pmu_event_filter to improved event filter settings

Message ID 20230717062343.3743-4-cloudliang@tencent.com (mailing list archive)
State New
Headers show
Series KVM: selftests: Improve PMU event filter settings and add test cases | expand

Commit Message

Jinrong Liang July 17, 2023, 6:23 a.m. UTC
From: Jinrong Liang <cloudliang@tencent.com>

Add custom "__kvm_pmu_event_filter" structure to improve pmu event
filter settings. Simplifies event filter setup by organizing event
filter parameters in a cleaner, more organized way.

Signed-off-by: Jinrong Liang <cloudliang@tencent.com>
---
 .../kvm/x86_64/pmu_event_filter_test.c        | 179 +++++++++---------
 1 file changed, 87 insertions(+), 92 deletions(-)

Comments

Isaku Yamahata July 19, 2023, 12:02 a.m. UTC | #1
On Mon, Jul 17, 2023 at 02:23:40PM +0800,
Jinrong Liang <ljr.kernel@gmail.com> wrote:

> From: Jinrong Liang <cloudliang@tencent.com>
> 
> Add custom "__kvm_pmu_event_filter" structure to improve pmu event
> filter settings. Simplifies event filter setup by organizing event
> filter parameters in a cleaner, more organized way.
> 
> Signed-off-by: Jinrong Liang <cloudliang@tencent.com>
> ---
>  .../kvm/x86_64/pmu_event_filter_test.c        | 179 +++++++++---------
>  1 file changed, 87 insertions(+), 92 deletions(-)
> 
> diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> index 5ac05e64bec9..ffcbbf25b29b 100644
> --- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> +++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> @@ -28,6 +28,10 @@
>  
>  #define NUM_BRANCHES 42
>  
> +/* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
> +#define MAX_FILTER_EVENTS		300
> +#define MAX_TEST_EVENTS		10
> +
>  /*
>   * This is how the event selector and unit mask are stored in an AMD
>   * core performance event-select register. Intel's format is similar,
> @@ -69,21 +73,33 @@
>  
>  #define INST_RETIRED EVENT(0xc0, 0)
>  
> +struct __kvm_pmu_event_filter {
> +	__u32 action;
> +	__u32 nevents;
> +	__u32 fixed_counter_bitmap;
> +	__u32 flags;
> +	__u32 pad[4];
> +	__u64 events[MAX_FILTER_EVENTS];
> +};
> +
>  /*
>   * This event list comprises Intel's eight architectural events plus
>   * AMD's "retired branch instructions" for Zen[123] (and possibly
>   * other AMD CPUs).
>   */
> -static const uint64_t event_list[] = {
> -	EVENT(0x3c, 0),
> -	INST_RETIRED,
> -	EVENT(0x3c, 1),
> -	EVENT(0x2e, 0x4f),
> -	EVENT(0x2e, 0x41),
> -	EVENT(0xc4, 0),
> -	EVENT(0xc5, 0),
> -	EVENT(0xa4, 1),
> -	AMD_ZEN_BR_RETIRED,
> +static const struct __kvm_pmu_event_filter base_event_filter = {
> +	.nevents = ARRAY_SIZE(base_event_filter.events),
> +	.events = {
> +		EVENT(0x3c, 0),
> +		INST_RETIRED,
> +		EVENT(0x3c, 1),
> +		EVENT(0x2e, 0x4f),
> +		EVENT(0x2e, 0x41),
> +		EVENT(0xc4, 0),
> +		EVENT(0xc5, 0),
> +		EVENT(0xa4, 1),
> +		AMD_ZEN_BR_RETIRED,
> +	},
>  };
>  
>  struct {
> @@ -225,47 +241,11 @@ static bool sanity_check_pmu(struct kvm_vcpu *vcpu)
>  	return !r;
>  }
>  
> -static struct kvm_pmu_event_filter *alloc_pmu_event_filter(uint32_t nevents)
> -{
> -	struct kvm_pmu_event_filter *f;
> -	int size = sizeof(*f) + nevents * sizeof(f->events[0]);
> -
> -	f = malloc(size);
> -	TEST_ASSERT(f, "Out of memory");
> -	memset(f, 0, size);
> -	f->nevents = nevents;
> -	return f;
> -}
> -
> -
> -static struct kvm_pmu_event_filter *
> -create_pmu_event_filter(const uint64_t event_list[], int nevents,
> -			uint32_t action, uint32_t flags)
> -{
> -	struct kvm_pmu_event_filter *f;
> -	int i;
> -
> -	f = alloc_pmu_event_filter(nevents);
> -	f->action = action;
> -	f->flags = flags;
> -	for (i = 0; i < nevents; i++)
> -		f->events[i] = event_list[i];
> -
> -	return f;
> -}
> -
> -static struct kvm_pmu_event_filter *event_filter(uint32_t action)
> -{
> -	return create_pmu_event_filter(event_list,
> -				       ARRAY_SIZE(event_list),
> -				       action, 0);
> -}
> -
>  /*
>   * Remove the first occurrence of 'event' (if any) from the filter's
>   * event list.
>   */
> -static void remove_event(struct kvm_pmu_event_filter *f, uint64_t event)
> +static void remove_event(struct __kvm_pmu_event_filter *f, uint64_t event)
>  {
>  	bool found = false;
>  	int i;
> @@ -313,66 +293,70 @@ static void test_without_filter(struct kvm_vcpu *vcpu)
>  }
>  
>  static void test_with_filter(struct kvm_vcpu *vcpu,
> -			     struct kvm_pmu_event_filter *f)
> +			     struct __kvm_pmu_event_filter *__f)
>  {
> +	struct kvm_pmu_event_filter *f = (void *)__f;
> +
>  	vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
>  	run_vcpu_and_sync_pmc_results(vcpu);
>  }
>  
>  static void test_amd_deny_list(struct kvm_vcpu *vcpu)
>  {
> -	uint64_t event = EVENT(0x1C2, 0);
> -	struct kvm_pmu_event_filter *f;
> +	struct __kvm_pmu_event_filter f = base_event_filter;
>  
> -	f = create_pmu_event_filter(&event, 1, KVM_PMU_EVENT_DENY, 0);
> -	test_with_filter(vcpu, f);
> -	free(f);
> +	f.action = KVM_PMU_EVENT_DENY;
> +	f.nevents = 1;
> +	f.events[0] = EVENT(0x1C2, 0);
> +	test_with_filter(vcpu, &f);

This overwrite all members.  We can use designated initializer.
	struct __kvm_pmu_event_filter f = {
                .action = KVM_PMU_EVENT_DENY,
                .nevents = 1,
                .events = {
                        EVENT(0x1C2, 0),
                },
        };

Except this, looks good to me.
Reviewed-by: Isaku Yamahata <isaku.yamahata@intel.com>

Thanks,

>  
>  	ASSERT_PMC_COUNTING_INSTRUCTIONS();
>  }
>  
>  static void test_member_deny_list(struct kvm_vcpu *vcpu)
>  {
> -	struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY);
> +	struct __kvm_pmu_event_filter f = base_event_filter;
>  
> -	test_with_filter(vcpu, f);
> -	free(f);
> +	f.action = KVM_PMU_EVENT_DENY;
> +	test_with_filter(vcpu, &f);
>  
>  	ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS();
>  }
>  
>  static void test_member_allow_list(struct kvm_vcpu *vcpu)
>  {
> -	struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_ALLOW);
> +	struct __kvm_pmu_event_filter f = base_event_filter;
>  
> -	test_with_filter(vcpu, f);
> -	free(f);
> +	f.action = KVM_PMU_EVENT_ALLOW;
> +	test_with_filter(vcpu, &f);
>  
>  	ASSERT_PMC_COUNTING_INSTRUCTIONS();
>  }
>  
>  static void test_not_member_deny_list(struct kvm_vcpu *vcpu)
>  {
> -	struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY);
> +	struct __kvm_pmu_event_filter f = base_event_filter;
> +
> +	f.action = KVM_PMU_EVENT_DENY;
>  
> -	remove_event(f, INST_RETIRED);
> -	remove_event(f, INTEL_BR_RETIRED);
> -	remove_event(f, AMD_ZEN_BR_RETIRED);
> -	test_with_filter(vcpu, f);
> -	free(f);
> +	remove_event(&f, INST_RETIRED);
> +	remove_event(&f, INTEL_BR_RETIRED);
> +	remove_event(&f, AMD_ZEN_BR_RETIRED);
> +	test_with_filter(vcpu, &f);
>  
>  	ASSERT_PMC_COUNTING_INSTRUCTIONS();
>  }
>  
>  static void test_not_member_allow_list(struct kvm_vcpu *vcpu)
>  {
> -	struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_ALLOW);
> +	struct __kvm_pmu_event_filter f = base_event_filter;
>  
> -	remove_event(f, INST_RETIRED);
> -	remove_event(f, INTEL_BR_RETIRED);
> -	remove_event(f, AMD_ZEN_BR_RETIRED);
> -	test_with_filter(vcpu, f);
> -	free(f);
> +	f.action = KVM_PMU_EVENT_ALLOW;
> +
> +	remove_event(&f, INST_RETIRED);
> +	remove_event(&f, INTEL_BR_RETIRED);
> +	remove_event(&f, AMD_ZEN_BR_RETIRED);
> +	test_with_filter(vcpu, &f);
>  
>  	ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS();
>  }
> @@ -567,19 +551,16 @@ static void run_masked_events_test(struct kvm_vcpu *vcpu,
>  				   const uint64_t masked_events[],
>  				   const int nmasked_events)
>  {
> -	struct kvm_pmu_event_filter *f;
> +	struct __kvm_pmu_event_filter f = {
> +		.nevents = nmasked_events,
> +		.action = KVM_PMU_EVENT_ALLOW,
> +		.flags = KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
> +	};
>  
> -	f = create_pmu_event_filter(masked_events, nmasked_events,
> -				    KVM_PMU_EVENT_ALLOW,
> -				    KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
> -	test_with_filter(vcpu, f);
> -	free(f);
> +	memcpy(f.events, masked_events, sizeof(uint64_t) * nmasked_events);
> +	test_with_filter(vcpu, &f);
>  }
>  
> -/* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
> -#define MAX_FILTER_EVENTS	300
> -#define MAX_TEST_EVENTS		10
> -
>  #define ALLOW_LOADS		BIT(0)
>  #define ALLOW_STORES		BIT(1)
>  #define ALLOW_LOADS_STORES	BIT(2)
> @@ -751,17 +732,27 @@ static void test_masked_events(struct kvm_vcpu *vcpu)
>  	run_masked_events_tests(vcpu, events, nevents);
>  }
>  
> -static int run_filter_test(struct kvm_vcpu *vcpu, const uint64_t *events,
> -			   int nevents, uint32_t flags)
> +static int do_vcpu_set_pmu_event_filter(struct kvm_vcpu *vcpu,
> +					struct __kvm_pmu_event_filter *__f)
>  {
> -	struct kvm_pmu_event_filter *f;
> -	int r;
> +	struct kvm_pmu_event_filter *f = (void *)__f;
>  
> -	f = create_pmu_event_filter(events, nevents, KVM_PMU_EVENT_ALLOW, flags);
> -	r = __vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
> -	free(f);
> +	return __vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
> +}
> +
> +static int set_pmu_single_event_filter(struct kvm_vcpu *vcpu, uint64_t event,
> +				       uint32_t flags, uint32_t action)
> +{
> +	struct __kvm_pmu_event_filter f = {
> +		.nevents = 1,
> +		.flags = flags,
> +		.action = action,
> +		.events = {
> +			event,
> +		},
> +	};
>  
> -	return r;
> +	return do_vcpu_set_pmu_event_filter(vcpu, &f);
>  }
>  
>  static void test_filter_ioctl(struct kvm_vcpu *vcpu)
> @@ -773,14 +764,18 @@ static void test_filter_ioctl(struct kvm_vcpu *vcpu)
>  	 * Unfortunately having invalid bits set in event data is expected to
>  	 * pass when flags == 0 (bits other than eventsel+umask).
>  	 */
> -	r = run_filter_test(vcpu, &e, 1, 0);
> +	r = set_pmu_single_event_filter(vcpu, e, 0, KVM_PMU_EVENT_ALLOW);
>  	TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing");
>  
> -	r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
> +	r = set_pmu_single_event_filter(vcpu, e,
> +					KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
> +					KVM_PMU_EVENT_ALLOW);
>  	TEST_ASSERT(r != 0, "Invalid PMU Event Filter is expected to fail");
>  
>  	e = KVM_PMU_ENCODE_MASKED_ENTRY(0xff, 0xff, 0xff, 0xf);
> -	r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
> +	r = set_pmu_single_event_filter(vcpu, e,
> +					KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
> +					KVM_PMU_EVENT_ALLOW);
>  	TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing");
>  }
>  
> -- 
> 2.39.3
>
Jinrong Liang July 19, 2023, 3:26 a.m. UTC | #2
Isaku Yamahata <isaku.yamahata@gmail.com> 于2023年7月19日周三 08:02写道:
>
> On Mon, Jul 17, 2023 at 02:23:40PM +0800,
> Jinrong Liang <ljr.kernel@gmail.com> wrote:
>
> > From: Jinrong Liang <cloudliang@tencent.com>
> >
> > Add custom "__kvm_pmu_event_filter" structure to improve pmu event
> > filter settings. Simplifies event filter setup by organizing event
> > filter parameters in a cleaner, more organized way.
> >
> > Signed-off-by: Jinrong Liang <cloudliang@tencent.com>
> > ---
> >  .../kvm/x86_64/pmu_event_filter_test.c        | 179 +++++++++---------
> >  1 file changed, 87 insertions(+), 92 deletions(-)
> >
> > diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> > index 5ac05e64bec9..ffcbbf25b29b 100644
> > --- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> > +++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
> > @@ -28,6 +28,10 @@
> >
> >  #define NUM_BRANCHES 42
> >
> > +/* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
> > +#define MAX_FILTER_EVENTS            300
> > +#define MAX_TEST_EVENTS              10
> > +
> >  /*
> >   * This is how the event selector and unit mask are stored in an AMD
> >   * core performance event-select register. Intel's format is similar,
> > @@ -69,21 +73,33 @@
> >
> >  #define INST_RETIRED EVENT(0xc0, 0)
> >
> > +struct __kvm_pmu_event_filter {
> > +     __u32 action;
> > +     __u32 nevents;
> > +     __u32 fixed_counter_bitmap;
> > +     __u32 flags;
> > +     __u32 pad[4];
> > +     __u64 events[MAX_FILTER_EVENTS];
> > +};
> > +
> >  /*
> >   * This event list comprises Intel's eight architectural events plus
> >   * AMD's "retired branch instructions" for Zen[123] (and possibly
> >   * other AMD CPUs).
> >   */
> > -static const uint64_t event_list[] = {
> > -     EVENT(0x3c, 0),
> > -     INST_RETIRED,
> > -     EVENT(0x3c, 1),
> > -     EVENT(0x2e, 0x4f),
> > -     EVENT(0x2e, 0x41),
> > -     EVENT(0xc4, 0),
> > -     EVENT(0xc5, 0),
> > -     EVENT(0xa4, 1),
> > -     AMD_ZEN_BR_RETIRED,
> > +static const struct __kvm_pmu_event_filter base_event_filter = {
> > +     .nevents = ARRAY_SIZE(base_event_filter.events),
> > +     .events = {
> > +             EVENT(0x3c, 0),
> > +             INST_RETIRED,
> > +             EVENT(0x3c, 1),
> > +             EVENT(0x2e, 0x4f),
> > +             EVENT(0x2e, 0x41),
> > +             EVENT(0xc4, 0),
> > +             EVENT(0xc5, 0),
> > +             EVENT(0xa4, 1),
> > +             AMD_ZEN_BR_RETIRED,
> > +     },
> >  };
> >
> >  struct {
> > @@ -225,47 +241,11 @@ static bool sanity_check_pmu(struct kvm_vcpu *vcpu)
> >       return !r;
> >  }
> >
> > -static struct kvm_pmu_event_filter *alloc_pmu_event_filter(uint32_t nevents)
> > -{
> > -     struct kvm_pmu_event_filter *f;
> > -     int size = sizeof(*f) + nevents * sizeof(f->events[0]);
> > -
> > -     f = malloc(size);
> > -     TEST_ASSERT(f, "Out of memory");
> > -     memset(f, 0, size);
> > -     f->nevents = nevents;
> > -     return f;
> > -}
> > -
> > -
> > -static struct kvm_pmu_event_filter *
> > -create_pmu_event_filter(const uint64_t event_list[], int nevents,
> > -                     uint32_t action, uint32_t flags)
> > -{
> > -     struct kvm_pmu_event_filter *f;
> > -     int i;
> > -
> > -     f = alloc_pmu_event_filter(nevents);
> > -     f->action = action;
> > -     f->flags = flags;
> > -     for (i = 0; i < nevents; i++)
> > -             f->events[i] = event_list[i];
> > -
> > -     return f;
> > -}
> > -
> > -static struct kvm_pmu_event_filter *event_filter(uint32_t action)
> > -{
> > -     return create_pmu_event_filter(event_list,
> > -                                    ARRAY_SIZE(event_list),
> > -                                    action, 0);
> > -}
> > -
> >  /*
> >   * Remove the first occurrence of 'event' (if any) from the filter's
> >   * event list.
> >   */
> > -static void remove_event(struct kvm_pmu_event_filter *f, uint64_t event)
> > +static void remove_event(struct __kvm_pmu_event_filter *f, uint64_t event)
> >  {
> >       bool found = false;
> >       int i;
> > @@ -313,66 +293,70 @@ static void test_without_filter(struct kvm_vcpu *vcpu)
> >  }
> >
> >  static void test_with_filter(struct kvm_vcpu *vcpu,
> > -                          struct kvm_pmu_event_filter *f)
> > +                          struct __kvm_pmu_event_filter *__f)
> >  {
> > +     struct kvm_pmu_event_filter *f = (void *)__f;
> > +
> >       vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
> >       run_vcpu_and_sync_pmc_results(vcpu);
> >  }
> >
> >  static void test_amd_deny_list(struct kvm_vcpu *vcpu)
> >  {
> > -     uint64_t event = EVENT(0x1C2, 0);
> > -     struct kvm_pmu_event_filter *f;
> > +     struct __kvm_pmu_event_filter f = base_event_filter;
> >
> > -     f = create_pmu_event_filter(&event, 1, KVM_PMU_EVENT_DENY, 0);
> > -     test_with_filter(vcpu, f);
> > -     free(f);
> > +     f.action = KVM_PMU_EVENT_DENY;
> > +     f.nevents = 1;
> > +     f.events[0] = EVENT(0x1C2, 0);
> > +     test_with_filter(vcpu, &f);
>
> This overwrite all members.  We can use designated initializer.
>         struct __kvm_pmu_event_filter f = {
>                 .action = KVM_PMU_EVENT_DENY,
>                 .nevents = 1,
>                 .events = {
>                         EVENT(0x1C2, 0),
>                 },
>         };

LGTM.

>
> Except this, looks good to me.
> Reviewed-by: Isaku Yamahata <isaku.yamahata@intel.com>
>
> Thanks,
>
> >
> >       ASSERT_PMC_COUNTING_INSTRUCTIONS();
> >  }
> >
> >  static void test_member_deny_list(struct kvm_vcpu *vcpu)
> >  {
> > -     struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY);
> > +     struct __kvm_pmu_event_filter f = base_event_filter;
> >
> > -     test_with_filter(vcpu, f);
> > -     free(f);
> > +     f.action = KVM_PMU_EVENT_DENY;
> > +     test_with_filter(vcpu, &f);
> >
> >       ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS();
> >  }
> >
> >  static void test_member_allow_list(struct kvm_vcpu *vcpu)
> >  {
> > -     struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_ALLOW);
> > +     struct __kvm_pmu_event_filter f = base_event_filter;
> >
> > -     test_with_filter(vcpu, f);
> > -     free(f);
> > +     f.action = KVM_PMU_EVENT_ALLOW;
> > +     test_with_filter(vcpu, &f);
> >
> >       ASSERT_PMC_COUNTING_INSTRUCTIONS();
> >  }
> >
> >  static void test_not_member_deny_list(struct kvm_vcpu *vcpu)
> >  {
> > -     struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY);
> > +     struct __kvm_pmu_event_filter f = base_event_filter;
> > +
> > +     f.action = KVM_PMU_EVENT_DENY;
> >
> > -     remove_event(f, INST_RETIRED);
> > -     remove_event(f, INTEL_BR_RETIRED);
> > -     remove_event(f, AMD_ZEN_BR_RETIRED);
> > -     test_with_filter(vcpu, f);
> > -     free(f);
> > +     remove_event(&f, INST_RETIRED);
> > +     remove_event(&f, INTEL_BR_RETIRED);
> > +     remove_event(&f, AMD_ZEN_BR_RETIRED);
> > +     test_with_filter(vcpu, &f);
> >
> >       ASSERT_PMC_COUNTING_INSTRUCTIONS();
> >  }
> >
> >  static void test_not_member_allow_list(struct kvm_vcpu *vcpu)
> >  {
> > -     struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_ALLOW);
> > +     struct __kvm_pmu_event_filter f = base_event_filter;
> >
> > -     remove_event(f, INST_RETIRED);
> > -     remove_event(f, INTEL_BR_RETIRED);
> > -     remove_event(f, AMD_ZEN_BR_RETIRED);
> > -     test_with_filter(vcpu, f);
> > -     free(f);
> > +     f.action = KVM_PMU_EVENT_ALLOW;
> > +
> > +     remove_event(&f, INST_RETIRED);
> > +     remove_event(&f, INTEL_BR_RETIRED);
> > +     remove_event(&f, AMD_ZEN_BR_RETIRED);
> > +     test_with_filter(vcpu, &f);
> >
> >       ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS();
> >  }
> > @@ -567,19 +551,16 @@ static void run_masked_events_test(struct kvm_vcpu *vcpu,
> >                                  const uint64_t masked_events[],
> >                                  const int nmasked_events)
> >  {
> > -     struct kvm_pmu_event_filter *f;
> > +     struct __kvm_pmu_event_filter f = {
> > +             .nevents = nmasked_events,
> > +             .action = KVM_PMU_EVENT_ALLOW,
> > +             .flags = KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
> > +     };
> >
> > -     f = create_pmu_event_filter(masked_events, nmasked_events,
> > -                                 KVM_PMU_EVENT_ALLOW,
> > -                                 KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
> > -     test_with_filter(vcpu, f);
> > -     free(f);
> > +     memcpy(f.events, masked_events, sizeof(uint64_t) * nmasked_events);
> > +     test_with_filter(vcpu, &f);
> >  }
> >
> > -/* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
> > -#define MAX_FILTER_EVENTS    300
> > -#define MAX_TEST_EVENTS              10
> > -
> >  #define ALLOW_LOADS          BIT(0)
> >  #define ALLOW_STORES         BIT(1)
> >  #define ALLOW_LOADS_STORES   BIT(2)
> > @@ -751,17 +732,27 @@ static void test_masked_events(struct kvm_vcpu *vcpu)
> >       run_masked_events_tests(vcpu, events, nevents);
> >  }
> >
> > -static int run_filter_test(struct kvm_vcpu *vcpu, const uint64_t *events,
> > -                        int nevents, uint32_t flags)
> > +static int do_vcpu_set_pmu_event_filter(struct kvm_vcpu *vcpu,
> > +                                     struct __kvm_pmu_event_filter *__f)
> >  {
> > -     struct kvm_pmu_event_filter *f;
> > -     int r;
> > +     struct kvm_pmu_event_filter *f = (void *)__f;
> >
> > -     f = create_pmu_event_filter(events, nevents, KVM_PMU_EVENT_ALLOW, flags);
> > -     r = __vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
> > -     free(f);
> > +     return __vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
> > +}
> > +
> > +static int set_pmu_single_event_filter(struct kvm_vcpu *vcpu, uint64_t event,
> > +                                    uint32_t flags, uint32_t action)
> > +{
> > +     struct __kvm_pmu_event_filter f = {
> > +             .nevents = 1,
> > +             .flags = flags,
> > +             .action = action,
> > +             .events = {
> > +                     event,
> > +             },
> > +     };
> >
> > -     return r;
> > +     return do_vcpu_set_pmu_event_filter(vcpu, &f);
> >  }
> >
> >  static void test_filter_ioctl(struct kvm_vcpu *vcpu)
> > @@ -773,14 +764,18 @@ static void test_filter_ioctl(struct kvm_vcpu *vcpu)
> >        * Unfortunately having invalid bits set in event data is expected to
> >        * pass when flags == 0 (bits other than eventsel+umask).
> >        */
> > -     r = run_filter_test(vcpu, &e, 1, 0);
> > +     r = set_pmu_single_event_filter(vcpu, e, 0, KVM_PMU_EVENT_ALLOW);
> >       TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing");
> >
> > -     r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
> > +     r = set_pmu_single_event_filter(vcpu, e,
> > +                                     KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
> > +                                     KVM_PMU_EVENT_ALLOW);
> >       TEST_ASSERT(r != 0, "Invalid PMU Event Filter is expected to fail");
> >
> >       e = KVM_PMU_ENCODE_MASKED_ENTRY(0xff, 0xff, 0xff, 0xf);
> > -     r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
> > +     r = set_pmu_single_event_filter(vcpu, e,
> > +                                     KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
> > +                                     KVM_PMU_EVENT_ALLOW);
> >       TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing");
> >  }
> >
> > --
> > 2.39.3
> >
>
> --
> Isaku Yamahata <isaku.yamahata@gmail.com>
diff mbox series

Patch

diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
index 5ac05e64bec9..ffcbbf25b29b 100644
--- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
+++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
@@ -28,6 +28,10 @@ 
 
 #define NUM_BRANCHES 42
 
+/* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
+#define MAX_FILTER_EVENTS		300
+#define MAX_TEST_EVENTS		10
+
 /*
  * This is how the event selector and unit mask are stored in an AMD
  * core performance event-select register. Intel's format is similar,
@@ -69,21 +73,33 @@ 
 
 #define INST_RETIRED EVENT(0xc0, 0)
 
+struct __kvm_pmu_event_filter {
+	__u32 action;
+	__u32 nevents;
+	__u32 fixed_counter_bitmap;
+	__u32 flags;
+	__u32 pad[4];
+	__u64 events[MAX_FILTER_EVENTS];
+};
+
 /*
  * This event list comprises Intel's eight architectural events plus
  * AMD's "retired branch instructions" for Zen[123] (and possibly
  * other AMD CPUs).
  */
-static const uint64_t event_list[] = {
-	EVENT(0x3c, 0),
-	INST_RETIRED,
-	EVENT(0x3c, 1),
-	EVENT(0x2e, 0x4f),
-	EVENT(0x2e, 0x41),
-	EVENT(0xc4, 0),
-	EVENT(0xc5, 0),
-	EVENT(0xa4, 1),
-	AMD_ZEN_BR_RETIRED,
+static const struct __kvm_pmu_event_filter base_event_filter = {
+	.nevents = ARRAY_SIZE(base_event_filter.events),
+	.events = {
+		EVENT(0x3c, 0),
+		INST_RETIRED,
+		EVENT(0x3c, 1),
+		EVENT(0x2e, 0x4f),
+		EVENT(0x2e, 0x41),
+		EVENT(0xc4, 0),
+		EVENT(0xc5, 0),
+		EVENT(0xa4, 1),
+		AMD_ZEN_BR_RETIRED,
+	},
 };
 
 struct {
@@ -225,47 +241,11 @@  static bool sanity_check_pmu(struct kvm_vcpu *vcpu)
 	return !r;
 }
 
-static struct kvm_pmu_event_filter *alloc_pmu_event_filter(uint32_t nevents)
-{
-	struct kvm_pmu_event_filter *f;
-	int size = sizeof(*f) + nevents * sizeof(f->events[0]);
-
-	f = malloc(size);
-	TEST_ASSERT(f, "Out of memory");
-	memset(f, 0, size);
-	f->nevents = nevents;
-	return f;
-}
-
-
-static struct kvm_pmu_event_filter *
-create_pmu_event_filter(const uint64_t event_list[], int nevents,
-			uint32_t action, uint32_t flags)
-{
-	struct kvm_pmu_event_filter *f;
-	int i;
-
-	f = alloc_pmu_event_filter(nevents);
-	f->action = action;
-	f->flags = flags;
-	for (i = 0; i < nevents; i++)
-		f->events[i] = event_list[i];
-
-	return f;
-}
-
-static struct kvm_pmu_event_filter *event_filter(uint32_t action)
-{
-	return create_pmu_event_filter(event_list,
-				       ARRAY_SIZE(event_list),
-				       action, 0);
-}
-
 /*
  * Remove the first occurrence of 'event' (if any) from the filter's
  * event list.
  */
-static void remove_event(struct kvm_pmu_event_filter *f, uint64_t event)
+static void remove_event(struct __kvm_pmu_event_filter *f, uint64_t event)
 {
 	bool found = false;
 	int i;
@@ -313,66 +293,70 @@  static void test_without_filter(struct kvm_vcpu *vcpu)
 }
 
 static void test_with_filter(struct kvm_vcpu *vcpu,
-			     struct kvm_pmu_event_filter *f)
+			     struct __kvm_pmu_event_filter *__f)
 {
+	struct kvm_pmu_event_filter *f = (void *)__f;
+
 	vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
 	run_vcpu_and_sync_pmc_results(vcpu);
 }
 
 static void test_amd_deny_list(struct kvm_vcpu *vcpu)
 {
-	uint64_t event = EVENT(0x1C2, 0);
-	struct kvm_pmu_event_filter *f;
+	struct __kvm_pmu_event_filter f = base_event_filter;
 
-	f = create_pmu_event_filter(&event, 1, KVM_PMU_EVENT_DENY, 0);
-	test_with_filter(vcpu, f);
-	free(f);
+	f.action = KVM_PMU_EVENT_DENY;
+	f.nevents = 1;
+	f.events[0] = EVENT(0x1C2, 0);
+	test_with_filter(vcpu, &f);
 
 	ASSERT_PMC_COUNTING_INSTRUCTIONS();
 }
 
 static void test_member_deny_list(struct kvm_vcpu *vcpu)
 {
-	struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY);
+	struct __kvm_pmu_event_filter f = base_event_filter;
 
-	test_with_filter(vcpu, f);
-	free(f);
+	f.action = KVM_PMU_EVENT_DENY;
+	test_with_filter(vcpu, &f);
 
 	ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS();
 }
 
 static void test_member_allow_list(struct kvm_vcpu *vcpu)
 {
-	struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_ALLOW);
+	struct __kvm_pmu_event_filter f = base_event_filter;
 
-	test_with_filter(vcpu, f);
-	free(f);
+	f.action = KVM_PMU_EVENT_ALLOW;
+	test_with_filter(vcpu, &f);
 
 	ASSERT_PMC_COUNTING_INSTRUCTIONS();
 }
 
 static void test_not_member_deny_list(struct kvm_vcpu *vcpu)
 {
-	struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY);
+	struct __kvm_pmu_event_filter f = base_event_filter;
+
+	f.action = KVM_PMU_EVENT_DENY;
 
-	remove_event(f, INST_RETIRED);
-	remove_event(f, INTEL_BR_RETIRED);
-	remove_event(f, AMD_ZEN_BR_RETIRED);
-	test_with_filter(vcpu, f);
-	free(f);
+	remove_event(&f, INST_RETIRED);
+	remove_event(&f, INTEL_BR_RETIRED);
+	remove_event(&f, AMD_ZEN_BR_RETIRED);
+	test_with_filter(vcpu, &f);
 
 	ASSERT_PMC_COUNTING_INSTRUCTIONS();
 }
 
 static void test_not_member_allow_list(struct kvm_vcpu *vcpu)
 {
-	struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_ALLOW);
+	struct __kvm_pmu_event_filter f = base_event_filter;
 
-	remove_event(f, INST_RETIRED);
-	remove_event(f, INTEL_BR_RETIRED);
-	remove_event(f, AMD_ZEN_BR_RETIRED);
-	test_with_filter(vcpu, f);
-	free(f);
+	f.action = KVM_PMU_EVENT_ALLOW;
+
+	remove_event(&f, INST_RETIRED);
+	remove_event(&f, INTEL_BR_RETIRED);
+	remove_event(&f, AMD_ZEN_BR_RETIRED);
+	test_with_filter(vcpu, &f);
 
 	ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS();
 }
@@ -567,19 +551,16 @@  static void run_masked_events_test(struct kvm_vcpu *vcpu,
 				   const uint64_t masked_events[],
 				   const int nmasked_events)
 {
-	struct kvm_pmu_event_filter *f;
+	struct __kvm_pmu_event_filter f = {
+		.nevents = nmasked_events,
+		.action = KVM_PMU_EVENT_ALLOW,
+		.flags = KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
+	};
 
-	f = create_pmu_event_filter(masked_events, nmasked_events,
-				    KVM_PMU_EVENT_ALLOW,
-				    KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
-	test_with_filter(vcpu, f);
-	free(f);
+	memcpy(f.events, masked_events, sizeof(uint64_t) * nmasked_events);
+	test_with_filter(vcpu, &f);
 }
 
-/* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
-#define MAX_FILTER_EVENTS	300
-#define MAX_TEST_EVENTS		10
-
 #define ALLOW_LOADS		BIT(0)
 #define ALLOW_STORES		BIT(1)
 #define ALLOW_LOADS_STORES	BIT(2)
@@ -751,17 +732,27 @@  static void test_masked_events(struct kvm_vcpu *vcpu)
 	run_masked_events_tests(vcpu, events, nevents);
 }
 
-static int run_filter_test(struct kvm_vcpu *vcpu, const uint64_t *events,
-			   int nevents, uint32_t flags)
+static int do_vcpu_set_pmu_event_filter(struct kvm_vcpu *vcpu,
+					struct __kvm_pmu_event_filter *__f)
 {
-	struct kvm_pmu_event_filter *f;
-	int r;
+	struct kvm_pmu_event_filter *f = (void *)__f;
 
-	f = create_pmu_event_filter(events, nevents, KVM_PMU_EVENT_ALLOW, flags);
-	r = __vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
-	free(f);
+	return __vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
+}
+
+static int set_pmu_single_event_filter(struct kvm_vcpu *vcpu, uint64_t event,
+				       uint32_t flags, uint32_t action)
+{
+	struct __kvm_pmu_event_filter f = {
+		.nevents = 1,
+		.flags = flags,
+		.action = action,
+		.events = {
+			event,
+		},
+	};
 
-	return r;
+	return do_vcpu_set_pmu_event_filter(vcpu, &f);
 }
 
 static void test_filter_ioctl(struct kvm_vcpu *vcpu)
@@ -773,14 +764,18 @@  static void test_filter_ioctl(struct kvm_vcpu *vcpu)
 	 * Unfortunately having invalid bits set in event data is expected to
 	 * pass when flags == 0 (bits other than eventsel+umask).
 	 */
-	r = run_filter_test(vcpu, &e, 1, 0);
+	r = set_pmu_single_event_filter(vcpu, e, 0, KVM_PMU_EVENT_ALLOW);
 	TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing");
 
-	r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
+	r = set_pmu_single_event_filter(vcpu, e,
+					KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
+					KVM_PMU_EVENT_ALLOW);
 	TEST_ASSERT(r != 0, "Invalid PMU Event Filter is expected to fail");
 
 	e = KVM_PMU_ENCODE_MASKED_ENTRY(0xff, 0xff, 0xff, 0xf);
-	r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
+	r = set_pmu_single_event_filter(vcpu, e,
+					KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
+					KVM_PMU_EVENT_ALLOW);
 	TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing");
 }