diff mbox series

[6/9] KVM: arm64: pmu: Use generated define for PMSELR_EL0.SEL access

Message ID 20240607-arm-pmu-3-9-icntr-v1-6-c7bd2dceff3b@kernel.org (mailing list archive)
State New, archived
Headers show
Series arm64: Add support for Armv9.4 PMU fixed instruction counter | expand

Commit Message

Rob Herring (Arm) June 7, 2024, 8:31 p.m. UTC
ARMV8_PMU_COUNTER_MASK is really a mask for the PMSELR_EL0.SEL register
field. Make that clear by adding a standard sysreg definition for the
register, and using it instead.

Signed-off-by: Rob Herring (Arm) <robh@kernel.org>
---
 arch/arm64/include/asm/sysreg.h |  1 -
 arch/arm64/kvm/sys_regs.c       | 10 +++++-----
 arch/arm64/tools/sysreg         |  5 +++++
 include/linux/perf/arm_pmuv3.h  |  1 -
 4 files changed, 10 insertions(+), 7 deletions(-)

Comments

Mark Rutland June 10, 2024, 11:10 a.m. UTC | #1
On Fri, Jun 07, 2024 at 02:31:31PM -0600, Rob Herring (Arm) wrote:
> ARMV8_PMU_COUNTER_MASK is really a mask for the PMSELR_EL0.SEL register
> field. Make that clear by adding a standard sysreg definition for the
> register, and using it instead.
> 
> Signed-off-by: Rob Herring (Arm) <robh@kernel.org>
> ---
>  arch/arm64/include/asm/sysreg.h |  1 -
>  arch/arm64/kvm/sys_regs.c       | 10 +++++-----
>  arch/arm64/tools/sysreg         |  5 +++++
>  include/linux/perf/arm_pmuv3.h  |  1 -
>  4 files changed, 10 insertions(+), 7 deletions(-)

This looks good to me; I checked the reg values match those in the
latest ARM ARM (ARM DDI 0487K.a pages 9016 to 9018), and they also match
the eixsting values in the kernel. The changes to use the new mask name
and to use SYS_FIELD_GET() all look good to me.

Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Mark Rutland <mark.rutland@arm.com>

Mark.

> 
> diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
> index af3b206fa423..b0d6c33f9ecc 100644
> --- a/arch/arm64/include/asm/sysreg.h
> +++ b/arch/arm64/include/asm/sysreg.h
> @@ -403,7 +403,6 @@
>  #define SYS_PMCNTENCLR_EL0		sys_reg(3, 3, 9, 12, 2)
>  #define SYS_PMOVSCLR_EL0		sys_reg(3, 3, 9, 12, 3)
>  #define SYS_PMSWINC_EL0			sys_reg(3, 3, 9, 12, 4)
> -#define SYS_PMSELR_EL0			sys_reg(3, 3, 9, 12, 5)
>  #define SYS_PMCEID0_EL0			sys_reg(3, 3, 9, 12, 6)
>  #define SYS_PMCEID1_EL0			sys_reg(3, 3, 9, 12, 7)
>  #define SYS_PMCCNTR_EL0			sys_reg(3, 3, 9, 13, 0)
> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> index 22b45a15d068..f8b5db48ea8a 100644
> --- a/arch/arm64/kvm/sys_regs.c
> +++ b/arch/arm64/kvm/sys_regs.c
> @@ -880,7 +880,7 @@ static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
>  static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
>  {
>  	reset_unknown(vcpu, r);
> -	__vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_COUNTER_MASK;
> +	__vcpu_sys_reg(vcpu, r->reg) &= PMSELR_EL0_SEL_MASK;
>  
>  	return __vcpu_sys_reg(vcpu, r->reg);
>  }
> @@ -972,7 +972,7 @@ static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
>  	else
>  		/* return PMSELR.SEL field */
>  		p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
> -			    & ARMV8_PMU_COUNTER_MASK;
> +			    & PMSELR_EL0_SEL_MASK;
>  
>  	return true;
>  }
> @@ -1040,8 +1040,8 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
>  			if (pmu_access_event_counter_el0_disabled(vcpu))
>  				return false;
>  
> -			idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
> -			      & ARMV8_PMU_COUNTER_MASK;
> +			idx = SYS_FIELD_GET(PMSELR_EL0, SEL,
> +					    __vcpu_sys_reg(vcpu, PMSELR_EL0));
>  		} else if (r->Op2 == 0) {
>  			/* PMCCNTR_EL0 */
>  			if (pmu_access_cycle_counter_el0_disabled(vcpu))
> @@ -1091,7 +1091,7 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
>  
>  	if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
>  		/* PMXEVTYPER_EL0 */
> -		idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
> +		idx = SYS_FIELD_GET(PMSELR_EL0, SEL, __vcpu_sys_reg(vcpu, PMSELR_EL0));
>  		reg = PMEVTYPER0_EL0 + idx;
>  	} else if (r->CRn == 14 && (r->CRm & 12) == 12) {
>  		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
> diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg
> index a4c1dd4741a4..231817a379b5 100644
> --- a/arch/arm64/tools/sysreg
> +++ b/arch/arm64/tools/sysreg
> @@ -2153,6 +2153,11 @@ Field	4	P
>  Field	3:0	ALIGN
>  EndSysreg
>  
> +Sysreg	PMSELR_EL0	3	3	9	12	5
> +Res0	63:5
> +Field	4:0	SEL
> +EndSysreg
> +
>  SysregFields	CONTEXTIDR_ELx
>  Res0	63:32
>  Field	31:0	PROCID
> diff --git a/include/linux/perf/arm_pmuv3.h b/include/linux/perf/arm_pmuv3.h
> index 46377e134d67..caa09241ad4f 100644
> --- a/include/linux/perf/arm_pmuv3.h
> +++ b/include/linux/perf/arm_pmuv3.h
> @@ -7,7 +7,6 @@
>  #define __PERF_ARM_PMUV3_H
>  
>  #define ARMV8_PMU_MAX_COUNTERS	32
> -#define ARMV8_PMU_COUNTER_MASK	(ARMV8_PMU_MAX_COUNTERS - 1)
>  
>  /*
>   * Common architectural and microarchitectural event numbers.
> 
> -- 
> 2.43.0
>
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index af3b206fa423..b0d6c33f9ecc 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -403,7 +403,6 @@ 
 #define SYS_PMCNTENCLR_EL0		sys_reg(3, 3, 9, 12, 2)
 #define SYS_PMOVSCLR_EL0		sys_reg(3, 3, 9, 12, 3)
 #define SYS_PMSWINC_EL0			sys_reg(3, 3, 9, 12, 4)
-#define SYS_PMSELR_EL0			sys_reg(3, 3, 9, 12, 5)
 #define SYS_PMCEID0_EL0			sys_reg(3, 3, 9, 12, 6)
 #define SYS_PMCEID1_EL0			sys_reg(3, 3, 9, 12, 7)
 #define SYS_PMCCNTR_EL0			sys_reg(3, 3, 9, 13, 0)
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 22b45a15d068..f8b5db48ea8a 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -880,7 +880,7 @@  static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
 static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
 {
 	reset_unknown(vcpu, r);
-	__vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_COUNTER_MASK;
+	__vcpu_sys_reg(vcpu, r->reg) &= PMSELR_EL0_SEL_MASK;
 
 	return __vcpu_sys_reg(vcpu, r->reg);
 }
@@ -972,7 +972,7 @@  static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 	else
 		/* return PMSELR.SEL field */
 		p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
-			    & ARMV8_PMU_COUNTER_MASK;
+			    & PMSELR_EL0_SEL_MASK;
 
 	return true;
 }
@@ -1040,8 +1040,8 @@  static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
 			if (pmu_access_event_counter_el0_disabled(vcpu))
 				return false;
 
-			idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
-			      & ARMV8_PMU_COUNTER_MASK;
+			idx = SYS_FIELD_GET(PMSELR_EL0, SEL,
+					    __vcpu_sys_reg(vcpu, PMSELR_EL0));
 		} else if (r->Op2 == 0) {
 			/* PMCCNTR_EL0 */
 			if (pmu_access_cycle_counter_el0_disabled(vcpu))
@@ -1091,7 +1091,7 @@  static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 
 	if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
 		/* PMXEVTYPER_EL0 */
-		idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
+		idx = SYS_FIELD_GET(PMSELR_EL0, SEL, __vcpu_sys_reg(vcpu, PMSELR_EL0));
 		reg = PMEVTYPER0_EL0 + idx;
 	} else if (r->CRn == 14 && (r->CRm & 12) == 12) {
 		idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg
index a4c1dd4741a4..231817a379b5 100644
--- a/arch/arm64/tools/sysreg
+++ b/arch/arm64/tools/sysreg
@@ -2153,6 +2153,11 @@  Field	4	P
 Field	3:0	ALIGN
 EndSysreg
 
+Sysreg	PMSELR_EL0	3	3	9	12	5
+Res0	63:5
+Field	4:0	SEL
+EndSysreg
+
 SysregFields	CONTEXTIDR_ELx
 Res0	63:32
 Field	31:0	PROCID
diff --git a/include/linux/perf/arm_pmuv3.h b/include/linux/perf/arm_pmuv3.h
index 46377e134d67..caa09241ad4f 100644
--- a/include/linux/perf/arm_pmuv3.h
+++ b/include/linux/perf/arm_pmuv3.h
@@ -7,7 +7,6 @@ 
 #define __PERF_ARM_PMUV3_H
 
 #define ARMV8_PMU_MAX_COUNTERS	32
-#define ARMV8_PMU_COUNTER_MASK	(ARMV8_PMU_MAX_COUNTERS - 1)
 
 /*
  * Common architectural and microarchitectural event numbers.