diff mbox series

[kvm-unit-tests,2/2] x86/pmu: Add AMD Guest PerfMonV2 testcases

Message ID 20220905123946.95223-7-likexu@tencent.com (mailing list archive)
State New, archived
Headers show
Series None | expand

Commit Message

Like Xu Sept. 5, 2022, 12:39 p.m. UTC
From: Like Xu <likexu@tencent.com>

Updated test cases to cover KVM enabling code for AMD Guest PerfMonV2.

The Intel-specific PMU helpers were added to check for AMD cpuid, and
some of the same semantics of MSRs were assigned during the initialization
phase. The vast majority of pmu test cases are reused seamlessly.

On some x86 machines (AMD only), even with retired events, the same
workload is measured repeatedly and the number of events collected is
erratic, which essentially reflects the details of hardware implementation,
and from a software perspective, the type of event is an unprecise event,
which brings a tolerance check in the counter overflow testcases.

Signed-off-by: Like Xu <likexu@tencent.com>
---
 lib/x86/msr.h       |  5 ++++
 lib/x86/processor.h |  9 ++++++-
 x86/pmu.c           | 61 ++++++++++++++++++++++++++++++++-------------
 3 files changed, 56 insertions(+), 19 deletions(-)

Comments

Sean Christopherson Oct. 5, 2022, 10:08 p.m. UTC | #1
Can you provide a single series for all of the KVM-Unit-Tests PMU work (separate
from the KVM patches)?  Ya, it'll be big and is a blatant violation of "do one
thing", but trying to manually handle the dependencies on the review side is time
consuming.

One thought to help keep track of dependencies between KVM and KUT would be to
add dummy commits between each sub-series, with the dummy commit containing a lore
link to the relevant KVM patches/series.  That would allow throwing everything into
one series without losing track of things.  Hopefully.

On Mon, Sep 05, 2022, Like Xu wrote:
> diff --git a/lib/x86/processor.h b/lib/x86/processor.h
> index 9c490d9..b9592c4 100644
> --- a/lib/x86/processor.h
> +++ b/lib/x86/processor.h
> @@ -796,8 +796,12 @@ static inline void flush_tlb(void)
>  
>  static inline u8 pmu_version(void)
>  {
> -	if (!is_intel())
> +	if (!is_intel()) {
> +		/* Performance Monitoring Version 2 Supported */
> +		if (cpuid(0x80000022).a & 0x1)

Add an X86_FEATURE_*, that way this is self-documenting.

> +			return 2;
>  		return 0;
> +	}
>  
>  	return cpuid(10).a & 0xff;
>  }
> @@ -824,6 +828,9 @@ static inline u8 pmu_nr_gp_counters(void)
>  {
>  	if (is_intel()) {
>  		return (cpuid(10).a >> 8) & 0xff;
> +	} else if (this_cpu_has_perf_global_ctrl()) {

Eww.  Took me too long to connect the dots to understand how this guarantees that
leaf 0x80000022 is available.  With an X86_FEATURE_* this can simply be:

	} else if (this_cpu_has(X86_FEATURE_AMD_PMU_V2) {

or whatever name is appropriate.

> +		/* Number of Core Performance Counters. */
> +		return cpuid(0x80000022).b & 0xf;
>  	} else if (!has_amd_perfctr_core()) {
>  		return AMD64_NUM_COUNTERS;
>  	}
> diff --git a/x86/pmu.c b/x86/pmu.c
> index 11607c0..6d5363b 100644
> --- a/x86/pmu.c
> +++ b/x86/pmu.c
> @@ -72,6 +72,9 @@ struct pmu_event {
>  #define PMU_CAP_FW_WRITES	(1ULL << 13)
>  static u32 gp_counter_base;
>  static u32 gp_select_base;
> +static u32 global_status_msr;
> +static u32 global_ctl_msr;
> +static u32 global_status_clr_msr;

What do you think about naming these like MSR #defines?  E.g.

  MSR_PERF_GLOBAL_CTRL
  MSR_PERF_GLOBAL_STATUS
  MSR_PERF_GLOBAL_STATUS_CLR

There's a little risk of causing confusing, but I think it would make the code
easier to read.

>  static unsigned int gp_events_size;
>  static unsigned int nr_gp_counters;
>  
> @@ -150,8 +153,7 @@ static void global_enable(pmu_counter_t *cnt)
>  		return;
>  
>  	cnt->idx = event_to_global_idx(cnt);
> -	wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, rdmsr(MSR_CORE_PERF_GLOBAL_CTRL) |
> -			(1ull << cnt->idx));
> +	wrmsr(global_ctl_msr, rdmsr(global_ctl_msr) | (1ull << cnt->idx));

Opportunistically use BIT_ULL().

>  }
>  
>  static void global_disable(pmu_counter_t *cnt)
> @@ -159,8 +161,7 @@ static void global_disable(pmu_counter_t *cnt)
>  	if (pmu_version() < 2)
>  		return;
>  
> -	wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, rdmsr(MSR_CORE_PERF_GLOBAL_CTRL) &
> -			~(1ull << cnt->idx));
> +	wrmsr(global_ctl_msr, rdmsr(global_ctl_msr) & ~(1ull << cnt->idx));

BIT_ULL()

>  }
>  
>  static inline uint32_t get_gp_counter_msr(unsigned int i)
> @@ -326,6 +327,23 @@ static void check_counters_many(void)
>  	report(i == n, "all counters");
>  }
>  
> +static bool is_the_count_reproducible(pmu_counter_t *cnt)
> +{
> +	unsigned int i;
> +	uint64_t count;
> +
> +	__measure(cnt, 0);
> +	count = cnt->count;
> +
> +	for (i = 0; i < 10; i++) {
> +		__measure(cnt, 0);
> +		if (count != cnt->count)
> +			return false;
> +	}
> +
> +	return true;
> +}
> +
>  static void check_counter_overflow(void)
>  {
>  	uint64_t count;
> @@ -334,13 +352,14 @@ static void check_counter_overflow(void)
>  		.ctr = gp_counter_base,
>  		.config = EVNTSEL_OS | EVNTSEL_USR | (*gp_events)[1].unit_sel /* instructions */,
>  	};
> +	bool precise_event = is_the_count_reproducible(&cnt);
> +
>  	__measure(&cnt, 0);
>  	count = cnt.count;
>  
>  	/* clear status before test */
>  	if (pmu_version() > 1) {

Please provide helper(s) to replace the myriad open coded "pmu_version() > ???"
checks.  E.g. this one appears to be:

	if (this_cpu_has_perf_global_status_clr())

I obviously don't care about the verbosity, it's that people like me might not
know what the PMU version has to do with an MSR being accessible.
Like Xu Oct. 19, 2022, 9:40 a.m. UTC | #2
All applied, thanks.

On 6/10/2022 6:08 am, Sean Christopherson wrote:
> Can you provide a single series for all of the KVM-Unit-Tests PMU work (separate
> from the KVM patches)?  Ya, it'll be big and is a blatant violation of "do one
> thing", but trying to manually handle the dependencies on the review side is time
> consuming.

Thanks for your time. PMU test cases will be combined, if no new ideas emerge.

> 
> One thought to help keep track of dependencies between KVM and KUT would be to
> add dummy commits between each sub-series, with the dummy commit containing a lore
> link to the relevant KVM patches/series.  That would allow throwing everything into
> one series without losing track of things.  Hopefully.

Sure, adding a lore link to a cover letter is always helpful. It seems that the 
ageing KVM project
has moved to a test-driven approach to development, and any new developer should 
be aware
of this rule.

> 
> On Mon, Sep 05, 2022, Like Xu wrote:
>> diff --git a/lib/x86/processor.h b/lib/x86/processor.h
>> index 9c490d9..b9592c4 100644
>> --- a/lib/x86/processor.h
>> +++ b/lib/x86/processor.h
>> @@ -796,8 +796,12 @@ static inline void flush_tlb(void)
>>   
>>   static inline u8 pmu_version(void)
>>   {
>> -	if (!is_intel())
>> +	if (!is_intel()) {
>> +		/* Performance Monitoring Version 2 Supported */
>> +		if (cpuid(0x80000022).a & 0x1)
> 
> Add an X86_FEATURE_*, that way this is self-documenting.
> 
>> +			return 2;
>>   		return 0;
>> +	}
>>   
>>   	return cpuid(10).a & 0xff;
>>   }
>> @@ -824,6 +828,9 @@ static inline u8 pmu_nr_gp_counters(void)
>>   {
>>   	if (is_intel()) {
>>   		return (cpuid(10).a >> 8) & 0xff;
>> +	} else if (this_cpu_has_perf_global_ctrl()) {
> 
> Eww.  Took me too long to connect the dots to understand how this guarantees that
> leaf 0x80000022 is available.  With an X86_FEATURE_* this can simply be:
> 
> 	} else if (this_cpu_has(X86_FEATURE_AMD_PMU_V2) {
> 
> or whatever name is appropriate.
> 
>> +		/* Number of Core Performance Counters. */
>> +		return cpuid(0x80000022).b & 0xf;
>>   	} else if (!has_amd_perfctr_core()) {
>>   		return AMD64_NUM_COUNTERS;
>>   	}
>> diff --git a/x86/pmu.c b/x86/pmu.c
>> index 11607c0..6d5363b 100644
>> --- a/x86/pmu.c
>> +++ b/x86/pmu.c
>> @@ -72,6 +72,9 @@ struct pmu_event {
>>   #define PMU_CAP_FW_WRITES	(1ULL << 13)
>>   static u32 gp_counter_base;
>>   static u32 gp_select_base;
>> +static u32 global_status_msr;
>> +static u32 global_ctl_msr;
>> +static u32 global_status_clr_msr;
> 
> What do you think about naming these like MSR #defines?  E.g.
> 
>    MSR_PERF_GLOBAL_CTRL
>    MSR_PERF_GLOBAL_STATUS
>    MSR_PERF_GLOBAL_STATUS_CLR
> 
> There's a little risk of causing confusing, but I think it would make the code
> easier to read.

Lowercase variable names are applied.

> 
>>   static unsigned int gp_events_size;
>>   static unsigned int nr_gp_counters;
>>   
>> @@ -150,8 +153,7 @@ static void global_enable(pmu_counter_t *cnt)
>>   		return;
>>   
>>   	cnt->idx = event_to_global_idx(cnt);
>> -	wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, rdmsr(MSR_CORE_PERF_GLOBAL_CTRL) |
>> -			(1ull << cnt->idx));
>> +	wrmsr(global_ctl_msr, rdmsr(global_ctl_msr) | (1ull << cnt->idx));
> 
> Opportunistically use BIT_ULL().
> 
>>   }
>>   
>>   static void global_disable(pmu_counter_t *cnt)
>> @@ -159,8 +161,7 @@ static void global_disable(pmu_counter_t *cnt)
>>   	if (pmu_version() < 2)
>>   		return;
>>   
>> -	wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, rdmsr(MSR_CORE_PERF_GLOBAL_CTRL) &
>> -			~(1ull << cnt->idx));
>> +	wrmsr(global_ctl_msr, rdmsr(global_ctl_msr) & ~(1ull << cnt->idx));
> 
> BIT_ULL()
> 
>>   }
>>   
>>   static inline uint32_t get_gp_counter_msr(unsigned int i)
>> @@ -326,6 +327,23 @@ static void check_counters_many(void)
>>   	report(i == n, "all counters");
>>   }
>>   
>> +static bool is_the_count_reproducible(pmu_counter_t *cnt)
>> +{
>> +	unsigned int i;
>> +	uint64_t count;
>> +
>> +	__measure(cnt, 0);
>> +	count = cnt->count;
>> +
>> +	for (i = 0; i < 10; i++) {
>> +		__measure(cnt, 0);
>> +		if (count != cnt->count)
>> +			return false;
>> +	}
>> +
>> +	return true;
>> +}
>> +
>>   static void check_counter_overflow(void)
>>   {
>>   	uint64_t count;
>> @@ -334,13 +352,14 @@ static void check_counter_overflow(void)
>>   		.ctr = gp_counter_base,
>>   		.config = EVNTSEL_OS | EVNTSEL_USR | (*gp_events)[1].unit_sel /* instructions */,
>>   	};
>> +	bool precise_event = is_the_count_reproducible(&cnt);
>> +
>>   	__measure(&cnt, 0);
>>   	count = cnt.count;
>>   
>>   	/* clear status before test */
>>   	if (pmu_version() > 1) {
> 
> Please provide helper(s) to replace the myriad open coded "pmu_version() > ???"
> checks.  E.g. this one appears to be:
> 
> 	if (this_cpu_has_perf_global_status_clr())
> 
> I obviously don't care about the verbosity, it's that people like me might not
> know what the PMU version has to do with an MSR being accessible.
diff mbox series

Patch

diff --git a/lib/x86/msr.h b/lib/x86/msr.h
index 5f16a58..6f31155 100644
--- a/lib/x86/msr.h
+++ b/lib/x86/msr.h
@@ -419,6 +419,11 @@ 
 #define MSR_CORE_PERF_GLOBAL_CTRL	0x0000038f
 #define MSR_CORE_PERF_GLOBAL_OVF_CTRL	0x00000390
 
+/* AMD Performance Counter Global Status and Control MSRs */
+#define MSR_AMD64_PERF_CNTR_GLOBAL_STATUS	0xc0000300
+#define MSR_AMD64_PERF_CNTR_GLOBAL_CTL		0xc0000301
+#define MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR	0xc0000302
+
 /* Geode defined MSRs */
 #define MSR_GEODE_BUSCONT_CONF0		0x00001900
 
diff --git a/lib/x86/processor.h b/lib/x86/processor.h
index 9c490d9..b9592c4 100644
--- a/lib/x86/processor.h
+++ b/lib/x86/processor.h
@@ -796,8 +796,12 @@  static inline void flush_tlb(void)
 
 static inline u8 pmu_version(void)
 {
-	if (!is_intel())
+	if (!is_intel()) {
+		/* Performance Monitoring Version 2 Supported */
+		if (cpuid(0x80000022).a & 0x1)
+			return 2;
 		return 0;
+	}
 
 	return cpuid(10).a & 0xff;
 }
@@ -824,6 +828,9 @@  static inline u8 pmu_nr_gp_counters(void)
 {
 	if (is_intel()) {
 		return (cpuid(10).a >> 8) & 0xff;
+	} else if (this_cpu_has_perf_global_ctrl()) {
+		/* Number of Core Performance Counters. */
+		return cpuid(0x80000022).b & 0xf;
 	} else if (!has_amd_perfctr_core()) {
 		return AMD64_NUM_COUNTERS;
 	}
diff --git a/x86/pmu.c b/x86/pmu.c
index 11607c0..6d5363b 100644
--- a/x86/pmu.c
+++ b/x86/pmu.c
@@ -72,6 +72,9 @@  struct pmu_event {
 #define PMU_CAP_FW_WRITES	(1ULL << 13)
 static u32 gp_counter_base;
 static u32 gp_select_base;
+static u32 global_status_msr;
+static u32 global_ctl_msr;
+static u32 global_status_clr_msr;
 static unsigned int gp_events_size;
 static unsigned int nr_gp_counters;
 
@@ -150,8 +153,7 @@  static void global_enable(pmu_counter_t *cnt)
 		return;
 
 	cnt->idx = event_to_global_idx(cnt);
-	wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, rdmsr(MSR_CORE_PERF_GLOBAL_CTRL) |
-			(1ull << cnt->idx));
+	wrmsr(global_ctl_msr, rdmsr(global_ctl_msr) | (1ull << cnt->idx));
 }
 
 static void global_disable(pmu_counter_t *cnt)
@@ -159,8 +161,7 @@  static void global_disable(pmu_counter_t *cnt)
 	if (pmu_version() < 2)
 		return;
 
-	wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, rdmsr(MSR_CORE_PERF_GLOBAL_CTRL) &
-			~(1ull << cnt->idx));
+	wrmsr(global_ctl_msr, rdmsr(global_ctl_msr) & ~(1ull << cnt->idx));
 }
 
 static inline uint32_t get_gp_counter_msr(unsigned int i)
@@ -326,6 +327,23 @@  static void check_counters_many(void)
 	report(i == n, "all counters");
 }
 
+static bool is_the_count_reproducible(pmu_counter_t *cnt)
+{
+	unsigned int i;
+	uint64_t count;
+
+	__measure(cnt, 0);
+	count = cnt->count;
+
+	for (i = 0; i < 10; i++) {
+		__measure(cnt, 0);
+		if (count != cnt->count)
+			return false;
+	}
+
+	return true;
+}
+
 static void check_counter_overflow(void)
 {
 	uint64_t count;
@@ -334,13 +352,14 @@  static void check_counter_overflow(void)
 		.ctr = gp_counter_base,
 		.config = EVNTSEL_OS | EVNTSEL_USR | (*gp_events)[1].unit_sel /* instructions */,
 	};
+	bool precise_event = is_the_count_reproducible(&cnt);
+
 	__measure(&cnt, 0);
 	count = cnt.count;
 
 	/* clear status before test */
 	if (pmu_version() > 1) {
-		wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL,
-		      rdmsr(MSR_CORE_PERF_GLOBAL_STATUS));
+		wrmsr(global_status_clr_msr, rdmsr(global_status_msr));
 	}
 
 	report_prefix_push("overflow");
@@ -373,7 +392,7 @@  static void check_counter_overflow(void)
 		__measure(&cnt, cnt.count);
 
 		report(check_irq() == (i % 2), "irq-%d", i);
-		if (pmu_version() > 1)
+		if (precise_event)
 			report(cnt.count == 1, "cntr-%d", i);
 		else
 			report(cnt.count < 4, "cntr-%d", i);
@@ -381,10 +400,10 @@  static void check_counter_overflow(void)
 		if (pmu_version() < 2)
 			continue;
 
-		status = rdmsr(MSR_CORE_PERF_GLOBAL_STATUS);
+		status = rdmsr(global_status_msr);
 		report(status & (1ull << idx), "status-%d", i);
-		wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL, status);
-		status = rdmsr(MSR_CORE_PERF_GLOBAL_STATUS);
+		wrmsr(global_status_clr_msr, status);
+		status = rdmsr(global_status_msr);
 		report(!(status & (1ull << idx)), "status clear-%d", i);
 	}
 
@@ -492,8 +511,7 @@  static void check_running_counter_wrmsr(void)
 
 	/* clear status before overflow test */
 	if (pmu_version() > 1) {
-		wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL,
-			rdmsr(MSR_CORE_PERF_GLOBAL_STATUS));
+		wrmsr(global_status_clr_msr, rdmsr(global_status_msr));
 	}
 
 	start_event(&evt);
@@ -508,7 +526,7 @@  static void check_running_counter_wrmsr(void)
 	stop_event(&evt);
 
 	if (pmu_version() > 1) {
-		status = rdmsr(MSR_CORE_PERF_GLOBAL_STATUS);
+		status = rdmsr(global_status_msr);
 		report(status & 1, "status");
 	}
 
@@ -532,8 +550,7 @@  static void check_emulated_instr(void)
 	report_prefix_push("emulated instruction");
 
 	if (pmu_version() > 1) {
-		wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL,
-			rdmsr(MSR_CORE_PERF_GLOBAL_STATUS));
+		wrmsr(global_status_clr_msr, rdmsr(global_status_msr));
 	}
 
 	start_event(&brnch_cnt);
@@ -576,7 +593,7 @@  static void check_emulated_instr(void)
 		: "eax", "ebx", "ecx", "edx");
 
 	if (pmu_version() > 1)
-		wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
+		wrmsr(global_ctl_msr, 0);
 
 	stop_event(&brnch_cnt);
 	stop_event(&instr_cnt);
@@ -590,7 +607,7 @@  static void check_emulated_instr(void)
 
 	if (pmu_version() > 1) {
 		// Additionally check that those counters overflowed properly.
-		status = rdmsr(MSR_CORE_PERF_GLOBAL_STATUS);
+		status = rdmsr(global_status_msr);
 		report(status & 1, "instruction counter overflow");
 		report(status & 2, "branch counter overflow");
 	}
@@ -679,7 +696,7 @@  static void set_ref_cycle_expectations(void)
 	if (!nr_gp_counters || !pmu_gp_counter_is_available(2))
 		return;
 
-	wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
+	wrmsr(global_ctl_msr, 0);
 
 	t0 = fenced_rdtsc();
 	start_event(&cnt);
@@ -722,6 +739,10 @@  static bool detect_intel_pmu(void)
 	gp_counter_base = MSR_IA32_PERFCTR0;
 	gp_select_base = MSR_P6_EVNTSEL0;
 
+	global_status_msr = MSR_CORE_PERF_GLOBAL_STATUS;
+	global_ctl_msr = MSR_CORE_PERF_GLOBAL_CTRL;
+	global_status_clr_msr = MSR_CORE_PERF_GLOBAL_OVF_CTRL;
+
 	report_prefix_push("Intel");
 	return true;
 }
@@ -746,6 +767,10 @@  static bool detect_amd_pmu(void)
 	gp_counter_base = MSR_F15H_PERF_CTR0;
 	gp_select_base = MSR_F15H_PERF_CTL0;
 
+	global_status_msr = MSR_AMD64_PERF_CNTR_GLOBAL_STATUS;
+	global_ctl_msr = MSR_AMD64_PERF_CNTR_GLOBAL_CTL;
+	global_status_clr_msr = MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR;
+
 	report_prefix_push("AMD");
 	return true;
 }