Message ID | 20220805135813.2102034-9-maz@kernel.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: arm64: PMU: Fixing chained events, and PMUv3p5 support | expand |
Hi Marc, On Fri, Aug 05, 2022 at 02:58:12PM +0100, Marc Zyngier wrote: > PMUv3p5 (which is mandatory with ARMv8.5) comes with some extra > features: > > - All counters are 64bit > > - The overflow point is controlled by the PMCR_EL0.LP bit > > Add the required checks in the helpers that control counter > width and overflow, as well as the sysreg handling for the LP > bit. A new kvm_pmu_is_3p5() helper makes it easy to spot the > PMUv3p5 specific handling. > > Signed-off-by: Marc Zyngier <maz@kernel.org> > --- > arch/arm64/kvm/pmu-emul.c | 8 +++++--- > arch/arm64/kvm/sys_regs.c | 4 ++++ > include/kvm/arm_pmu.h | 8 ++++++++ > 3 files changed, 17 insertions(+), 3 deletions(-) > > diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c > index 33a88ca7b7fd..b33a2953cbf6 100644 > --- a/arch/arm64/kvm/pmu-emul.c > +++ b/arch/arm64/kvm/pmu-emul.c > @@ -50,13 +50,15 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm) > */ > static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx) > { > - return (select_idx == ARMV8_PMU_CYCLE_IDX); > + return (select_idx == ARMV8_PMU_CYCLE_IDX || kvm_pmu_is_3p5(vcpu)); > } > > static bool kvm_pmu_idx_has_64bit_overflow(struct kvm_vcpu *vcpu, u64 select_idx) > { > - return (select_idx == ARMV8_PMU_CYCLE_IDX && > - __vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC); > + u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0); > + > + return (select_idx < ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LP)) || > + (select_idx == ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LC)); > } > > static bool kvm_pmu_counter_can_chain(struct kvm_vcpu *vcpu, u64 idx) > diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c > index c0595f31dab8..2b5e0ec5c100 100644 > --- a/arch/arm64/kvm/sys_regs.c > +++ b/arch/arm64/kvm/sys_regs.c > @@ -654,6 +654,8 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) > | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E); > if (!system_supports_32bit_el0()) > val |= ARMV8_PMU_PMCR_LC; > + if (!kvm_pmu_is_3p5(vcpu)) > + val &= ~ARMV8_PMU_PMCR_LP; > __vcpu_sys_reg(vcpu, r->reg) = val; > } > > @@ -703,6 +705,8 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, > val |= p->regval & ARMV8_PMU_PMCR_MASK; > if (!system_supports_32bit_el0()) > val |= ARMV8_PMU_PMCR_LC; > + if (!kvm_pmu_is_3p5(vcpu)) > + val &= ~ARMV8_PMU_PMCR_LP; > __vcpu_sys_reg(vcpu, PMCR_EL0) = val; > kvm_pmu_handle_pmcr(vcpu, val); > kvm_vcpu_pmu_restore_guest(vcpu); > diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h > index 6bda9b071084..846502251923 100644 > --- a/include/kvm/arm_pmu.h > +++ b/include/kvm/arm_pmu.h > @@ -89,6 +89,13 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu); > vcpu->arch.pmu.events = *kvm_get_pmu_events(); \ > } while (0) > > +/* > + * Evaluates as true when emulating PMUv3p5, and false otherwise. > + */ > +#define kvm_pmu_is_3p5(vcpu) \ > + (vcpu->kvm->arch.dfr0_pmuver >= ID_AA64DFR0_PMUVER_8_5 && \ > + vcpu->kvm->arch.dfr0_pmuver != ID_AA64DFR0_PMUVER_IMP_DEF) I don't believe the IMP_DEF condition will ever evaluate to false as dfr0_pmuver is sanitized at initialization and writes from userspace. > u8 kvm_arm_pmu_get_host_pmuver(void); > > #else > @@ -153,6 +160,7 @@ static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) > } > > #define kvm_vcpu_has_pmu(vcpu) ({ false; }) > +#define kvm_pmu_is_3p5(vcpu) ({ false; }) > static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {} > static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {} > static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {} > -- > 2.34.1 >
On Wed, 10 Aug 2022 08:16:14 +0100, Oliver Upton <oliver.upton@linux.dev> wrote: > > Hi Marc, > > On Fri, Aug 05, 2022 at 02:58:12PM +0100, Marc Zyngier wrote: > > PMUv3p5 (which is mandatory with ARMv8.5) comes with some extra > > features: > > > > - All counters are 64bit > > > > - The overflow point is controlled by the PMCR_EL0.LP bit > > > > Add the required checks in the helpers that control counter > > width and overflow, as well as the sysreg handling for the LP > > bit. A new kvm_pmu_is_3p5() helper makes it easy to spot the > > PMUv3p5 specific handling. > > > > Signed-off-by: Marc Zyngier <maz@kernel.org> > > --- > > arch/arm64/kvm/pmu-emul.c | 8 +++++--- > > arch/arm64/kvm/sys_regs.c | 4 ++++ > > include/kvm/arm_pmu.h | 8 ++++++++ > > 3 files changed, 17 insertions(+), 3 deletions(-) > > > > diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c > > index 33a88ca7b7fd..b33a2953cbf6 100644 > > --- a/arch/arm64/kvm/pmu-emul.c > > +++ b/arch/arm64/kvm/pmu-emul.c > > @@ -50,13 +50,15 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm) > > */ > > static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx) > > { > > - return (select_idx == ARMV8_PMU_CYCLE_IDX); > > + return (select_idx == ARMV8_PMU_CYCLE_IDX || kvm_pmu_is_3p5(vcpu)); > > } > > > > static bool kvm_pmu_idx_has_64bit_overflow(struct kvm_vcpu *vcpu, u64 select_idx) > > { > > - return (select_idx == ARMV8_PMU_CYCLE_IDX && > > - __vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC); > > + u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0); > > + > > + return (select_idx < ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LP)) || > > + (select_idx == ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LC)); > > } > > > > static bool kvm_pmu_counter_can_chain(struct kvm_vcpu *vcpu, u64 idx) > > diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c > > index c0595f31dab8..2b5e0ec5c100 100644 > > --- a/arch/arm64/kvm/sys_regs.c > > +++ b/arch/arm64/kvm/sys_regs.c > > @@ -654,6 +654,8 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) > > | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E); > > if (!system_supports_32bit_el0()) > > val |= ARMV8_PMU_PMCR_LC; > > + if (!kvm_pmu_is_3p5(vcpu)) > > + val &= ~ARMV8_PMU_PMCR_LP; > > __vcpu_sys_reg(vcpu, r->reg) = val; > > } > > > > @@ -703,6 +705,8 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, > > val |= p->regval & ARMV8_PMU_PMCR_MASK; > > if (!system_supports_32bit_el0()) > > val |= ARMV8_PMU_PMCR_LC; > > + if (!kvm_pmu_is_3p5(vcpu)) > > + val &= ~ARMV8_PMU_PMCR_LP; > > __vcpu_sys_reg(vcpu, PMCR_EL0) = val; > > kvm_pmu_handle_pmcr(vcpu, val); > > kvm_vcpu_pmu_restore_guest(vcpu); > > diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h > > index 6bda9b071084..846502251923 100644 > > --- a/include/kvm/arm_pmu.h > > +++ b/include/kvm/arm_pmu.h > > @@ -89,6 +89,13 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu); > > vcpu->arch.pmu.events = *kvm_get_pmu_events(); \ > > } while (0) > > > > +/* > > + * Evaluates as true when emulating PMUv3p5, and false otherwise. > > + */ > > +#define kvm_pmu_is_3p5(vcpu) \ > > + (vcpu->kvm->arch.dfr0_pmuver >= ID_AA64DFR0_PMUVER_8_5 && \ > > + vcpu->kvm->arch.dfr0_pmuver != ID_AA64DFR0_PMUVER_IMP_DEF) > > I don't believe the IMP_DEF condition will ever evaluate to false as > dfr0_pmuver is sanitized at initialization and writes from userspace. Good point. That's a leftover from a previous version. I'll fix that. Thanks, M.
On Wed, Aug 10, 2022 at 2:28 AM Marc Zyngier <maz@kernel.org> wrote: > > On Wed, 10 Aug 2022 08:16:14 +0100, > Oliver Upton <oliver.upton@linux.dev> wrote: > > > > Hi Marc, > > > > On Fri, Aug 05, 2022 at 02:58:12PM +0100, Marc Zyngier wrote: > > > PMUv3p5 (which is mandatory with ARMv8.5) comes with some extra > > > features: > > > > > > - All counters are 64bit > > > > > > - The overflow point is controlled by the PMCR_EL0.LP bit > > > > > > Add the required checks in the helpers that control counter > > > width and overflow, as well as the sysreg handling for the LP > > > bit. A new kvm_pmu_is_3p5() helper makes it easy to spot the > > > PMUv3p5 specific handling. > > > > > > Signed-off-by: Marc Zyngier <maz@kernel.org> > > > --- > > > arch/arm64/kvm/pmu-emul.c | 8 +++++--- > > > arch/arm64/kvm/sys_regs.c | 4 ++++ > > > include/kvm/arm_pmu.h | 8 ++++++++ > > > 3 files changed, 17 insertions(+), 3 deletions(-) > > > > > > diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c > > > index 33a88ca7b7fd..b33a2953cbf6 100644 > > > --- a/arch/arm64/kvm/pmu-emul.c > > > +++ b/arch/arm64/kvm/pmu-emul.c > > > @@ -50,13 +50,15 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm) > > > */ > > > static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx) > > > { > > > - return (select_idx == ARMV8_PMU_CYCLE_IDX); > > > + return (select_idx == ARMV8_PMU_CYCLE_IDX || kvm_pmu_is_3p5(vcpu)); > > > } > > > > > > static bool kvm_pmu_idx_has_64bit_overflow(struct kvm_vcpu *vcpu, u64 select_idx) > > > { > > > - return (select_idx == ARMV8_PMU_CYCLE_IDX && > > > - __vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC); > > > + u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0); > > > + > > > + return (select_idx < ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LP)) || > > > + (select_idx == ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LC)); > > > } > > > > > > static bool kvm_pmu_counter_can_chain(struct kvm_vcpu *vcpu, u64 idx) > > > diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c > > > index c0595f31dab8..2b5e0ec5c100 100644 > > > --- a/arch/arm64/kvm/sys_regs.c > > > +++ b/arch/arm64/kvm/sys_regs.c > > > @@ -654,6 +654,8 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) > > > | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E); Not directly related to this series, but using 0xdecafbad above appears to be odd. I think that would lead the bit 3 and 5 to be unconditionally set in the register's reset value that the guest will initially see even on the configuration where those should be RES0. > > > if (!system_supports_32bit_el0()) > > > val |= ARMV8_PMU_PMCR_LC; > > > + if (!kvm_pmu_is_3p5(vcpu)) > > > + val &= ~ARMV8_PMU_PMCR_LP; > > > __vcpu_sys_reg(vcpu, r->reg) = val; > > > } > > > > > > @@ -703,6 +705,8 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, > > > val |= p->regval & ARMV8_PMU_PMCR_MASK; > > > if (!system_supports_32bit_el0()) > > > val |= ARMV8_PMU_PMCR_LC; > > > + if (!kvm_pmu_is_3p5(vcpu)) > > > + val &= ~ARMV8_PMU_PMCR_LP; > > > __vcpu_sys_reg(vcpu, PMCR_EL0) = val; > > > kvm_pmu_handle_pmcr(vcpu, val); > > > kvm_vcpu_pmu_restore_guest(vcpu); > > > diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h > > > index 6bda9b071084..846502251923 100644 > > > --- a/include/kvm/arm_pmu.h > > > +++ b/include/kvm/arm_pmu.h > > > @@ -89,6 +89,13 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu); > > > vcpu->arch.pmu.events = *kvm_get_pmu_events(); \ > > > } while (0) > > > > > > +/* > > > + * Evaluates as true when emulating PMUv3p5, and false otherwise. > > > + */ > > > +#define kvm_pmu_is_3p5(vcpu) \ > > > + (vcpu->kvm->arch.dfr0_pmuver >= ID_AA64DFR0_PMUVER_8_5 && \ > > > + vcpu->kvm->arch.dfr0_pmuver != ID_AA64DFR0_PMUVER_IMP_DEF) > > > > I don't believe the IMP_DEF condition will ever evaluate to false as > > dfr0_pmuver is sanitized at initialization and writes from userspace. > > Good point. That's a leftover from a previous version. I'll fix that. With the current series, I think the dfr0_pmuver could be IMP_DEF due to the same bug that I mentioned for the patch-6. (https://lore.kernel.org/all/20220214065746.1230608-11-reijiw@google.com/) Thank you, Reiji
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 33a88ca7b7fd..b33a2953cbf6 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -50,13 +50,15 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm) */ static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx) { - return (select_idx == ARMV8_PMU_CYCLE_IDX); + return (select_idx == ARMV8_PMU_CYCLE_IDX || kvm_pmu_is_3p5(vcpu)); } static bool kvm_pmu_idx_has_64bit_overflow(struct kvm_vcpu *vcpu, u64 select_idx) { - return (select_idx == ARMV8_PMU_CYCLE_IDX && - __vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC); + u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0); + + return (select_idx < ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LP)) || + (select_idx == ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LC)); } static bool kvm_pmu_counter_can_chain(struct kvm_vcpu *vcpu, u64 idx) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index c0595f31dab8..2b5e0ec5c100 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -654,6 +654,8 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E); if (!system_supports_32bit_el0()) val |= ARMV8_PMU_PMCR_LC; + if (!kvm_pmu_is_3p5(vcpu)) + val &= ~ARMV8_PMU_PMCR_LP; __vcpu_sys_reg(vcpu, r->reg) = val; } @@ -703,6 +705,8 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, val |= p->regval & ARMV8_PMU_PMCR_MASK; if (!system_supports_32bit_el0()) val |= ARMV8_PMU_PMCR_LC; + if (!kvm_pmu_is_3p5(vcpu)) + val &= ~ARMV8_PMU_PMCR_LP; __vcpu_sys_reg(vcpu, PMCR_EL0) = val; kvm_pmu_handle_pmcr(vcpu, val); kvm_vcpu_pmu_restore_guest(vcpu); diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 6bda9b071084..846502251923 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -89,6 +89,13 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu); vcpu->arch.pmu.events = *kvm_get_pmu_events(); \ } while (0) +/* + * Evaluates as true when emulating PMUv3p5, and false otherwise. + */ +#define kvm_pmu_is_3p5(vcpu) \ + (vcpu->kvm->arch.dfr0_pmuver >= ID_AA64DFR0_PMUVER_8_5 && \ + vcpu->kvm->arch.dfr0_pmuver != ID_AA64DFR0_PMUVER_IMP_DEF) + u8 kvm_arm_pmu_get_host_pmuver(void); #else @@ -153,6 +160,7 @@ static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) } #define kvm_vcpu_has_pmu(vcpu) ({ false; }) +#define kvm_pmu_is_3p5(vcpu) ({ false; }) static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {} static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {} static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
PMUv3p5 (which is mandatory with ARMv8.5) comes with some extra features: - All counters are 64bit - The overflow point is controlled by the PMCR_EL0.LP bit Add the required checks in the helpers that control counter width and overflow, as well as the sysreg handling for the LP bit. A new kvm_pmu_is_3p5() helper makes it easy to spot the PMUv3p5 specific handling. Signed-off-by: Marc Zyngier <maz@kernel.org> --- arch/arm64/kvm/pmu-emul.c | 8 +++++--- arch/arm64/kvm/sys_regs.c | 4 ++++ include/kvm/arm_pmu.h | 8 ++++++++ 3 files changed, 17 insertions(+), 3 deletions(-)