diff mbox series

[3/9] KVM: arm64: PMU: Only narrow counters that are not 64bit wide

Message ID 20220805135813.2102034-4-maz@kernel.org (mailing list archive)
State New, archived
Headers show
Series KVM: arm64: PMU: Fixing chained events, and PMUv3p5 support | expand

Commit Message

Marc Zyngier Aug. 5, 2022, 1:58 p.m. UTC
The current PMU emulation sometimes narrows counters to 32bit
if the counter isn't the cycle counter. As this is going to
change with PMUv3p5 where the counters are all 64bit, only
perform the narrowing on 32bit counters.

Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/kvm/pmu-emul.c | 17 +++++++++--------
 1 file changed, 9 insertions(+), 8 deletions(-)

Comments

Reiji Watanabe Aug. 24, 2022, 4:07 a.m. UTC | #1
Hi Marc,

On Fri, Aug 5, 2022 at 6:58 AM Marc Zyngier <maz@kernel.org> wrote:
>
> The current PMU emulation sometimes narrows counters to 32bit
> if the counter isn't the cycle counter. As this is going to
> change with PMUv3p5 where the counters are all 64bit, only
> perform the narrowing on 32bit counters.
>
> Signed-off-by: Marc Zyngier <maz@kernel.org>
> ---
>  arch/arm64/kvm/pmu-emul.c | 17 +++++++++--------
>  1 file changed, 9 insertions(+), 8 deletions(-)
>
> diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
> index 9040d3c80096..0ab6f59f433c 100644
> --- a/arch/arm64/kvm/pmu-emul.c
> +++ b/arch/arm64/kvm/pmu-emul.c
> @@ -149,22 +149,22 @@ static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
>   */
>  static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
>  {
> -       u64 counter, reg, val;
> +       u64 counter, reg;
>
>         if (!pmc->perf_event)
>                 return;
>
>         counter = kvm_pmu_get_counter_value(vcpu, pmc->idx);
>
> -       if (pmc->idx == ARMV8_PMU_CYCLE_IDX) {
> +       if (pmc->idx == ARMV8_PMU_CYCLE_IDX)
>                 reg = PMCCNTR_EL0;
> -               val = counter;
> -       } else {
> +       else
>                 reg = PMEVCNTR0_EL0 + pmc->idx;
> -               val = lower_32_bits(counter);
> -       }
>
> -       __vcpu_sys_reg(vcpu, reg) = val;
> +       if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
> +               counter = lower_32_bits(counter);

It appears that narrowing the counter to 32bit here is unnecessary
because it is already done by kvm_pmu_get_counter_value().

Thank you,
Reiji

> +
> +       __vcpu_sys_reg(vcpu, reg) = counter;
>
>         kvm_pmu_release_perf_event(pmc);
>  }
> @@ -417,7 +417,8 @@ static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu,
>
>                 /* Increment this counter */
>                 reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
> -               reg = lower_32_bits(reg);
> +               if (!kvm_pmu_idx_is_64bit(vcpu, i))
> +                       reg = lower_32_bits(reg);
>                 __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
>
>                 if (reg) /* No overflow? move on */
> --
> 2.34.1
>
> _______________________________________________
> kvmarm mailing list
> kvmarm@lists.cs.columbia.edu
> https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
diff mbox series

Patch

diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 9040d3c80096..0ab6f59f433c 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -149,22 +149,22 @@  static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
  */
 static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
 {
-	u64 counter, reg, val;
+	u64 counter, reg;
 
 	if (!pmc->perf_event)
 		return;
 
 	counter = kvm_pmu_get_counter_value(vcpu, pmc->idx);
 
-	if (pmc->idx == ARMV8_PMU_CYCLE_IDX) {
+	if (pmc->idx == ARMV8_PMU_CYCLE_IDX)
 		reg = PMCCNTR_EL0;
-		val = counter;
-	} else {
+	else
 		reg = PMEVCNTR0_EL0 + pmc->idx;
-		val = lower_32_bits(counter);
-	}
 
-	__vcpu_sys_reg(vcpu, reg) = val;
+	if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
+		counter = lower_32_bits(counter);
+
+	__vcpu_sys_reg(vcpu, reg) = counter;
 
 	kvm_pmu_release_perf_event(pmc);
 }
@@ -417,7 +417,8 @@  static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu,
 
 		/* Increment this counter */
 		reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
-		reg = lower_32_bits(reg);
+		if (!kvm_pmu_idx_is_64bit(vcpu, i))
+			reg = lower_32_bits(reg);
 		__vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
 
 		if (reg) /* No overflow? move on */