diff mbox series

[1/4] KVM: x86/pmu: Use enums instead of hardcoded magic for arch event indices

Message ID 20230607010206.1425277-2-seanjc@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86/pmu: Clean up arch/hw event handling | expand

Commit Message

Sean Christopherson June 7, 2023, 1:02 a.m. UTC
Add "enum intel_pmu_architectural_events" to replace the magic numbers for
the (pseudo-)architectural events, and to give a meaningful name to each
event so that new readers don't need psychic powers to understand what the
code is doing.

Cc: Aaron Lewis <aaronlewis@google.com>
Cc: Like Xu <like.xu.linux@gmail.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 arch/x86/kvm/vmx/pmu_intel.c | 55 ++++++++++++++++++++++++++++--------
 1 file changed, 43 insertions(+), 12 deletions(-)

Comments

Like Xu July 10, 2023, 10:50 a.m. UTC | #1
On 7/6/2023 9:02 am, Sean Christopherson wrote:
> Add "enum intel_pmu_architectural_events" to replace the magic numbers for
> the (pseudo-)architectural events, and to give a meaningful name to each
> event so that new readers don't need psychic powers to understand what the
> code is doing.
> 
> Cc: Aaron Lewis <aaronlewis@google.com>
> Cc: Like Xu <like.xu.linux@gmail.com>
> Signed-off-by: Sean Christopherson <seanjc@google.com>

Reviewed-by: Like Xu <likexu@tencent.com>
// I should have done it. Thanks.

> ---
>   arch/x86/kvm/vmx/pmu_intel.c | 55 ++++++++++++++++++++++++++++--------
>   1 file changed, 43 insertions(+), 12 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
> index 84be32d9f365..0050d71c9c01 100644
> --- a/arch/x86/kvm/vmx/pmu_intel.c
> +++ b/arch/x86/kvm/vmx/pmu_intel.c
> @@ -22,23 +22,51 @@
>   
>   #define MSR_PMC_FULL_WIDTH_BIT      (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0)
>   
> +enum intel_pmu_architectural_events {
> +	/*
> +	 * The order of the architectural events matters as support for each
> +	 * event is enumerated via CPUID using the index of the event.
> +	 */
> +	INTEL_ARCH_CPU_CYCLES,
> +	INTEL_ARCH_INSTRUCTIONS_RETIRED,
> +	INTEL_ARCH_REFERENCE_CYCLES,
> +	INTEL_ARCH_LLC_REFERENCES,
> +	INTEL_ARCH_LLC_MISSES,
> +	INTEL_ARCH_BRANCHES_RETIRED,
> +	INTEL_ARCH_BRANCHES_MISPREDICTED,
> +
> +	NR_REAL_INTEL_ARCH_EVENTS,
> +
> +	/*
> +	 * Pseudo-architectural event used to implement IA32_FIXED_CTR2, a.k.a.
> +	 * TSC reference cycles.  The architectural reference cycles event may
> +	 * or may not actually use the TSC as the reference, e.g. might use the
> +	 * core crystal clock or the bus clock (yeah, "architectural").
> +	 */
> +	PSEUDO_ARCH_REFERENCE_CYCLES = NR_REAL_INTEL_ARCH_EVENTS,
> +	NR_INTEL_ARCH_EVENTS,
> +};
> +
>   static struct {
>   	u8 eventsel;
>   	u8 unit_mask;
>   } const intel_arch_events[] = {
> -	[0] = { 0x3c, 0x00 },
> -	[1] = { 0xc0, 0x00 },
> -	[2] = { 0x3c, 0x01 },
> -	[3] = { 0x2e, 0x4f },
> -	[4] = { 0x2e, 0x41 },
> -	[5] = { 0xc4, 0x00 },
> -	[6] = { 0xc5, 0x00 },
> -	/* The above index must match CPUID 0x0A.EBX bit vector */
> -	[7] = { 0x00, 0x03 },
> +	[INTEL_ARCH_CPU_CYCLES]			= { 0x3c, 0x00 },
> +	[INTEL_ARCH_INSTRUCTIONS_RETIRED]	= { 0xc0, 0x00 },
> +	[INTEL_ARCH_REFERENCE_CYCLES]		= { 0x3c, 0x01 },
> +	[INTEL_ARCH_LLC_REFERENCES]		= { 0x2e, 0x4f },
> +	[INTEL_ARCH_LLC_MISSES]			= { 0x2e, 0x41 },
> +	[INTEL_ARCH_BRANCHES_RETIRED]		= { 0xc4, 0x00 },
> +	[INTEL_ARCH_BRANCHES_MISPREDICTED]	= { 0xc5, 0x00 },
> +	[PSEUDO_ARCH_REFERENCE_CYCLES]		= { 0x00, 0x03 },
>   };
>   
>   /* mapping between fixed pmc index and intel_arch_events array */
> -static int fixed_pmc_events[] = {1, 0, 7};
> +static int fixed_pmc_events[] = {
> +	[0] = INTEL_ARCH_INSTRUCTIONS_RETIRED,
> +	[1] = INTEL_ARCH_CPU_CYCLES,
> +	[2] = PSEUDO_ARCH_REFERENCE_CYCLES,
> +};
>   
>   static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
>   {
> @@ -92,13 +120,16 @@ static bool intel_hw_event_available(struct kvm_pmc *pmc)
>   	u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
>   	int i;
>   
> -	for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++) {
> +	BUILD_BUG_ON(ARRAY_SIZE(intel_arch_events) != NR_INTEL_ARCH_EVENTS);
> +
> +	for (i = 0; i < NR_INTEL_ARCH_EVENTS; i++) {
>   		if (intel_arch_events[i].eventsel != event_select ||
>   		    intel_arch_events[i].unit_mask != unit_mask)
>   			continue;
>   
>   		/* disable event that reported as not present by cpuid */
> -		if ((i < 7) && !(pmu->available_event_types & (1 << i)))
> +		if ((i < PSEUDO_ARCH_REFERENCE_CYCLES) &&
> +		    !(pmu->available_event_types & (1 << i)))

CHECK: Unnecessary parentheses around 'i < PSEUDO_ARCH_REFERENCE_CYCLES'
#164: FILE: arch/x86/kvm/vmx/pmu_intel.c:131:
+		if ((i < PSEUDO_ARCH_REFERENCE_CYCLES) &&
+		    !(pmu->available_event_types & (1 << i)))

>   			return false;
>   
>   		break;
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 84be32d9f365..0050d71c9c01 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -22,23 +22,51 @@ 
 
 #define MSR_PMC_FULL_WIDTH_BIT      (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0)
 
+enum intel_pmu_architectural_events {
+	/*
+	 * The order of the architectural events matters as support for each
+	 * event is enumerated via CPUID using the index of the event.
+	 */
+	INTEL_ARCH_CPU_CYCLES,
+	INTEL_ARCH_INSTRUCTIONS_RETIRED,
+	INTEL_ARCH_REFERENCE_CYCLES,
+	INTEL_ARCH_LLC_REFERENCES,
+	INTEL_ARCH_LLC_MISSES,
+	INTEL_ARCH_BRANCHES_RETIRED,
+	INTEL_ARCH_BRANCHES_MISPREDICTED,
+
+	NR_REAL_INTEL_ARCH_EVENTS,
+
+	/*
+	 * Pseudo-architectural event used to implement IA32_FIXED_CTR2, a.k.a.
+	 * TSC reference cycles.  The architectural reference cycles event may
+	 * or may not actually use the TSC as the reference, e.g. might use the
+	 * core crystal clock or the bus clock (yeah, "architectural").
+	 */
+	PSEUDO_ARCH_REFERENCE_CYCLES = NR_REAL_INTEL_ARCH_EVENTS,
+	NR_INTEL_ARCH_EVENTS,
+};
+
 static struct {
 	u8 eventsel;
 	u8 unit_mask;
 } const intel_arch_events[] = {
-	[0] = { 0x3c, 0x00 },
-	[1] = { 0xc0, 0x00 },
-	[2] = { 0x3c, 0x01 },
-	[3] = { 0x2e, 0x4f },
-	[4] = { 0x2e, 0x41 },
-	[5] = { 0xc4, 0x00 },
-	[6] = { 0xc5, 0x00 },
-	/* The above index must match CPUID 0x0A.EBX bit vector */
-	[7] = { 0x00, 0x03 },
+	[INTEL_ARCH_CPU_CYCLES]			= { 0x3c, 0x00 },
+	[INTEL_ARCH_INSTRUCTIONS_RETIRED]	= { 0xc0, 0x00 },
+	[INTEL_ARCH_REFERENCE_CYCLES]		= { 0x3c, 0x01 },
+	[INTEL_ARCH_LLC_REFERENCES]		= { 0x2e, 0x4f },
+	[INTEL_ARCH_LLC_MISSES]			= { 0x2e, 0x41 },
+	[INTEL_ARCH_BRANCHES_RETIRED]		= { 0xc4, 0x00 },
+	[INTEL_ARCH_BRANCHES_MISPREDICTED]	= { 0xc5, 0x00 },
+	[PSEUDO_ARCH_REFERENCE_CYCLES]		= { 0x00, 0x03 },
 };
 
 /* mapping between fixed pmc index and intel_arch_events array */
-static int fixed_pmc_events[] = {1, 0, 7};
+static int fixed_pmc_events[] = {
+	[0] = INTEL_ARCH_INSTRUCTIONS_RETIRED,
+	[1] = INTEL_ARCH_CPU_CYCLES,
+	[2] = PSEUDO_ARCH_REFERENCE_CYCLES,
+};
 
 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
 {
@@ -92,13 +120,16 @@  static bool intel_hw_event_available(struct kvm_pmc *pmc)
 	u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
 	int i;
 
-	for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++) {
+	BUILD_BUG_ON(ARRAY_SIZE(intel_arch_events) != NR_INTEL_ARCH_EVENTS);
+
+	for (i = 0; i < NR_INTEL_ARCH_EVENTS; i++) {
 		if (intel_arch_events[i].eventsel != event_select ||
 		    intel_arch_events[i].unit_mask != unit_mask)
 			continue;
 
 		/* disable event that reported as not present by cpuid */
-		if ((i < 7) && !(pmu->available_event_types & (1 << i)))
+		if ((i < PSEUDO_ARCH_REFERENCE_CYCLES) &&
+		    !(pmu->available_event_types & (1 << i)))
 			return false;
 
 		break;