Message ID | 20231123063750.2176250-3-shahuang@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: selftests: aarch64: Introduce pmu_event_filter_test | expand |
Hi Shaoqin, On 11/23/23 07:37, Shaoqin Huang wrote: > Move those pmu helper function into lib/, thus it can be used by other functions Not really into lib but rather in vpmu.h header. > pmu test. no functional change intended > > Signed-off-by: Shaoqin Huang <shahuang@redhat.com> > --- > .../kvm/aarch64/vpmu_counter_access.c | 118 ----------------- > .../selftests/kvm/include/aarch64/vpmu.h | 119 ++++++++++++++++++ > 2 files changed, 119 insertions(+), 118 deletions(-) > > diff --git a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c > index 17305408a334..62d6315790ab 100644 > --- a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c > +++ b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c > @@ -20,12 +20,6 @@ > #include <perf/arm_pmuv3.h> > #include <linux/bitfield.h> > > -/* The max number of the PMU event counters (excluding the cycle counter) */ > -#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1) > - > -/* The cycle counter bit position that's common among the PMU registers */ > -#define ARMV8_PMU_CYCLE_IDX 31 > - > static struct vpmu_vm *vpmu_vm; > > struct pmreg_sets { > @@ -35,118 +29,6 @@ struct pmreg_sets { > > #define PMREG_SET(set, clr) {.set_reg_id = set, .clr_reg_id = clr} > > -static uint64_t get_pmcr_n(uint64_t pmcr) > -{ > - return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; > -} > - > -static void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n) > -{ > - *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT); > - *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT); > -} > - > -static uint64_t get_counters_mask(uint64_t n) > -{ > - uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX); > - > - if (n) > - mask |= GENMASK(n - 1, 0); > - return mask; > -} > - > -/* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */ > -static inline unsigned long read_sel_evcntr(int sel) > -{ > - write_sysreg(sel, pmselr_el0); > - isb(); > - return read_sysreg(pmxevcntr_el0); > -} > - > -/* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */ > -static inline void write_sel_evcntr(int sel, unsigned long val) > -{ > - write_sysreg(sel, pmselr_el0); > - isb(); > - write_sysreg(val, pmxevcntr_el0); > - isb(); > -} > - > -/* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */ > -static inline unsigned long read_sel_evtyper(int sel) > -{ > - write_sysreg(sel, pmselr_el0); > - isb(); > - return read_sysreg(pmxevtyper_el0); > -} > - > -/* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */ > -static inline void write_sel_evtyper(int sel, unsigned long val) > -{ > - write_sysreg(sel, pmselr_el0); > - isb(); > - write_sysreg(val, pmxevtyper_el0); > - isb(); > -} > - > -static inline void enable_counter(int idx) > -{ > - uint64_t v = read_sysreg(pmcntenset_el0); > - > - write_sysreg(BIT(idx) | v, pmcntenset_el0); > - isb(); > -} > - > -static inline void disable_counter(int idx) > -{ > - uint64_t v = read_sysreg(pmcntenset_el0); > - > - write_sysreg(BIT(idx) | v, pmcntenclr_el0); > - isb(); > -} > - > -static void pmu_disable_reset(void) > -{ > - uint64_t pmcr = read_sysreg(pmcr_el0); > - > - /* Reset all counters, disabling them */ > - pmcr &= ~ARMV8_PMU_PMCR_E; > - write_sysreg(pmcr | ARMV8_PMU_PMCR_P, pmcr_el0); > - isb(); > -} > - > -#define RETURN_READ_PMEVCNTRN(n) \ > - return read_sysreg(pmevcntr##n##_el0) > -static unsigned long read_pmevcntrn(int n) > -{ > - PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN); > - return 0; > -} > - > -#define WRITE_PMEVCNTRN(n) \ > - write_sysreg(val, pmevcntr##n##_el0) > -static void write_pmevcntrn(int n, unsigned long val) > -{ > - PMEVN_SWITCH(n, WRITE_PMEVCNTRN); > - isb(); > -} > - > -#define READ_PMEVTYPERN(n) \ > - return read_sysreg(pmevtyper##n##_el0) > -static unsigned long read_pmevtypern(int n) > -{ > - PMEVN_SWITCH(n, READ_PMEVTYPERN); > - return 0; > -} > - > -#define WRITE_PMEVTYPERN(n) \ > - write_sysreg(val, pmevtyper##n##_el0) > -static void write_pmevtypern(int n, unsigned long val) > -{ > - PMEVN_SWITCH(n, WRITE_PMEVTYPERN); > - isb(); > -} > - > /* > * The pmc_accessor structure has pointers to PMEV{CNTR,TYPER}<n>_EL0 > * accessors that test cases will use. Each of the accessors will > diff --git a/tools/testing/selftests/kvm/include/aarch64/vpmu.h b/tools/testing/selftests/kvm/include/aarch64/vpmu.h > index 0a56183644ee..e0cc1ca1c4b7 100644 > --- a/tools/testing/selftests/kvm/include/aarch64/vpmu.h > +++ b/tools/testing/selftests/kvm/include/aarch64/vpmu.h > @@ -1,10 +1,17 @@ > /* SPDX-License-Identifier: GPL-2.0 */ > > #include <kvm_util.h> > +#include <perf/arm_pmuv3.h> > > #define GICD_BASE_GPA 0x8000000ULL > #define GICR_BASE_GPA 0x80A0000ULL > > +/* The max number of the PMU event counters (excluding the cycle counter) */ > +#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1) > + > +/* The cycle counter bit position that's common among the PMU registers */ > +#define ARMV8_PMU_CYCLE_IDX 31 > + > struct vpmu_vm { > struct kvm_vm *vm; > struct kvm_vcpu *vcpu; > @@ -14,3 +21,115 @@ struct vpmu_vm { > struct vpmu_vm *create_vpmu_vm(void *guest_code); > > void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm); > + > +static inline uint64_t get_pmcr_n(uint64_t pmcr) > +{ > + return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; > +} > + > +static inline void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n) > +{ > + *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT); > + *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT); > +} > + > +static inline uint64_t get_counters_mask(uint64_t n) > +{ > + uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX); > + > + if (n) > + mask |= GENMASK(n - 1, 0); > + return mask; > +} > + > +/* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */ > +static inline unsigned long read_sel_evcntr(int sel) > +{ > + write_sysreg(sel, pmselr_el0); > + isb(); > + return read_sysreg(pmxevcntr_el0); > +} > + > +/* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */ > +static inline void write_sel_evcntr(int sel, unsigned long val) > +{ > + write_sysreg(sel, pmselr_el0); > + isb(); > + write_sysreg(val, pmxevcntr_el0); > + isb(); > +} > + > +/* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */ > +static inline unsigned long read_sel_evtyper(int sel) > +{ > + write_sysreg(sel, pmselr_el0); > + isb(); > + return read_sysreg(pmxevtyper_el0); > +} > + > +/* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */ > +static inline void write_sel_evtyper(int sel, unsigned long val) > +{ > + write_sysreg(sel, pmselr_el0); > + isb(); > + write_sysreg(val, pmxevtyper_el0); > + isb(); > +} > + > +static inline void enable_counter(int idx) > +{ > + uint64_t v = read_sysreg(pmcntenset_el0); > + > + write_sysreg(BIT(idx) | v, pmcntenset_el0); > + isb(); > +} > + > +static inline void disable_counter(int idx) > +{ > + uint64_t v = read_sysreg(pmcntenset_el0); > + > + write_sysreg(BIT(idx) | v, pmcntenclr_el0); > + isb(); > +} > + > +static inline void pmu_disable_reset(void) > +{ > + uint64_t pmcr = read_sysreg(pmcr_el0); > + > + /* Reset all counters, disabling them */ > + pmcr &= ~ARMV8_PMU_PMCR_E; > + write_sysreg(pmcr | ARMV8_PMU_PMCR_P, pmcr_el0); > + isb(); > +} > + > +#define RETURN_READ_PMEVCNTRN(n) \ > + return read_sysreg(pmevcntr##n##_el0) > +static inline unsigned long read_pmevcntrn(int n) > +{ > + PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN); > + return 0; > +} > + > +#define WRITE_PMEVCNTRN(n) \ > + write_sysreg(val, pmevcntr##n##_el0) > +static inline void write_pmevcntrn(int n, unsigned long val) > +{ > + PMEVN_SWITCH(n, WRITE_PMEVCNTRN); > + isb(); > +} > + > +#define READ_PMEVTYPERN(n) \ > + return read_sysreg(pmevtyper##n##_el0) > +static inline unsigned long read_pmevtypern(int n) > +{ > + PMEVN_SWITCH(n, READ_PMEVTYPERN); > + return 0; > +} > + > +#define WRITE_PMEVTYPERN(n) \ > + write_sysreg(val, pmevtyper##n##_el0) > +static inline void write_pmevtypern(int n, unsigned long val) > +{ > + PMEVN_SWITCH(n, WRITE_PMEVTYPERN); > + isb(); > +} Reviewed-by: Eric Auger <eric.auger@redhat.com> Thanks Eric
Hi Shaoqin, On Wed, Nov 22, 2023 at 10:39 PM Shaoqin Huang <shahuang@redhat.com> wrote: > > Move those pmu helper function into lib/, thus it can be used by other > pmu test. > > Signed-off-by: Shaoqin Huang <shahuang@redhat.com> > --- > .../kvm/aarch64/vpmu_counter_access.c | 118 ----------------- > .../selftests/kvm/include/aarch64/vpmu.h | 119 ++++++++++++++++++ > 2 files changed, 119 insertions(+), 118 deletions(-) > > diff --git a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c > index 17305408a334..62d6315790ab 100644 > --- a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c > +++ b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c > @@ -20,12 +20,6 @@ > #include <perf/arm_pmuv3.h> > #include <linux/bitfield.h> > > -/* The max number of the PMU event counters (excluding the cycle counter) */ > -#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1) > - > -/* The cycle counter bit position that's common among the PMU registers */ > -#define ARMV8_PMU_CYCLE_IDX 31 > - > static struct vpmu_vm *vpmu_vm; > > struct pmreg_sets { > @@ -35,118 +29,6 @@ struct pmreg_sets { > > #define PMREG_SET(set, clr) {.set_reg_id = set, .clr_reg_id = clr} > > -static uint64_t get_pmcr_n(uint64_t pmcr) > -{ > - return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; > -} > - > -static void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n) > -{ > - *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT); > - *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT); > -} > - > -static uint64_t get_counters_mask(uint64_t n) > -{ > - uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX); > - > - if (n) > - mask |= GENMASK(n - 1, 0); > - return mask; > -} > - > -/* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */ > -static inline unsigned long read_sel_evcntr(int sel) > -{ > - write_sysreg(sel, pmselr_el0); > - isb(); > - return read_sysreg(pmxevcntr_el0); > -} > - > -/* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */ > -static inline void write_sel_evcntr(int sel, unsigned long val) > -{ > - write_sysreg(sel, pmselr_el0); > - isb(); > - write_sysreg(val, pmxevcntr_el0); > - isb(); > -} > - > -/* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */ > -static inline unsigned long read_sel_evtyper(int sel) > -{ > - write_sysreg(sel, pmselr_el0); > - isb(); > - return read_sysreg(pmxevtyper_el0); > -} > - > -/* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */ > -static inline void write_sel_evtyper(int sel, unsigned long val) > -{ > - write_sysreg(sel, pmselr_el0); > - isb(); > - write_sysreg(val, pmxevtyper_el0); > - isb(); > -} > - > -static inline void enable_counter(int idx) > -{ > - uint64_t v = read_sysreg(pmcntenset_el0); > - > - write_sysreg(BIT(idx) | v, pmcntenset_el0); > - isb(); > -} > - > -static inline void disable_counter(int idx) > -{ > - uint64_t v = read_sysreg(pmcntenset_el0); > - > - write_sysreg(BIT(idx) | v, pmcntenclr_el0); > - isb(); > -} > - > -static void pmu_disable_reset(void) > -{ > - uint64_t pmcr = read_sysreg(pmcr_el0); > - > - /* Reset all counters, disabling them */ > - pmcr &= ~ARMV8_PMU_PMCR_E; > - write_sysreg(pmcr | ARMV8_PMU_PMCR_P, pmcr_el0); > - isb(); > -} > - > -#define RETURN_READ_PMEVCNTRN(n) \ > - return read_sysreg(pmevcntr##n##_el0) > -static unsigned long read_pmevcntrn(int n) > -{ > - PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN); > - return 0; > -} > - > -#define WRITE_PMEVCNTRN(n) \ > - write_sysreg(val, pmevcntr##n##_el0) > -static void write_pmevcntrn(int n, unsigned long val) > -{ > - PMEVN_SWITCH(n, WRITE_PMEVCNTRN); > - isb(); > -} > - > -#define READ_PMEVTYPERN(n) \ > - return read_sysreg(pmevtyper##n##_el0) > -static unsigned long read_pmevtypern(int n) > -{ > - PMEVN_SWITCH(n, READ_PMEVTYPERN); > - return 0; > -} > - > -#define WRITE_PMEVTYPERN(n) \ > - write_sysreg(val, pmevtyper##n##_el0) > -static void write_pmevtypern(int n, unsigned long val) > -{ > - PMEVN_SWITCH(n, WRITE_PMEVTYPERN); > - isb(); > -} > - > /* > * The pmc_accessor structure has pointers to PMEV{CNTR,TYPER}<n>_EL0 > * accessors that test cases will use. Each of the accessors will > diff --git a/tools/testing/selftests/kvm/include/aarch64/vpmu.h b/tools/testing/selftests/kvm/include/aarch64/vpmu.h > index 0a56183644ee..e0cc1ca1c4b7 100644 > --- a/tools/testing/selftests/kvm/include/aarch64/vpmu.h > +++ b/tools/testing/selftests/kvm/include/aarch64/vpmu.h > @@ -1,10 +1,17 @@ > /* SPDX-License-Identifier: GPL-2.0 */ > > #include <kvm_util.h> > +#include <perf/arm_pmuv3.h> > > #define GICD_BASE_GPA 0x8000000ULL > #define GICR_BASE_GPA 0x80A0000ULL > > +/* The max number of the PMU event counters (excluding the cycle counter) */ > +#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1) > + > +/* The cycle counter bit position that's common among the PMU registers */ > +#define ARMV8_PMU_CYCLE_IDX 31 > + > struct vpmu_vm { > struct kvm_vm *vm; > struct kvm_vcpu *vcpu; > @@ -14,3 +21,115 @@ struct vpmu_vm { > struct vpmu_vm *create_vpmu_vm(void *guest_code); > > void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm); > + > +static inline uint64_t get_pmcr_n(uint64_t pmcr) > +{ > + return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; > +} > + > +static inline void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n) > +{ > + *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT); > + *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT); > +} > + > +static inline uint64_t get_counters_mask(uint64_t n) > +{ > + uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX); > + > + if (n) > + mask |= GENMASK(n - 1, 0); > + return mask; > +} > + > +/* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */ > +static inline unsigned long read_sel_evcntr(int sel) > +{ > + write_sysreg(sel, pmselr_el0); > + isb(); > + return read_sysreg(pmxevcntr_el0); > +} > + > +/* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */ > +static inline void write_sel_evcntr(int sel, unsigned long val) > +{ > + write_sysreg(sel, pmselr_el0); > + isb(); > + write_sysreg(val, pmxevcntr_el0); > + isb(); > +} > + > +/* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */ > +static inline unsigned long read_sel_evtyper(int sel) > +{ > + write_sysreg(sel, pmselr_el0); > + isb(); > + return read_sysreg(pmxevtyper_el0); > +} > + > +/* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */ > +static inline void write_sel_evtyper(int sel, unsigned long val) > +{ > + write_sysreg(sel, pmselr_el0); > + isb(); > + write_sysreg(val, pmxevtyper_el0); > + isb(); > +} > + > +static inline void enable_counter(int idx) > +{ > + uint64_t v = read_sysreg(pmcntenset_el0); > + > + write_sysreg(BIT(idx) | v, pmcntenset_el0); > + isb(); > +} > + > +static inline void disable_counter(int idx) > +{ > + uint64_t v = read_sysreg(pmcntenset_el0); > + > + write_sysreg(BIT(idx) | v, pmcntenclr_el0); > + isb(); > +} > + As mentioned in [1], the current implementation of disable_counter() is buggy and would end up disabling all the counters. However if you intend to keep it (even though it would remain unused), may be change the definition something to: static inline void disable_counter(int idx) { write_sysreg(BIT(idx), pmcntenclr_el0); isb(); } Thank you. Raghavendra [1]: https://lore.kernel.org/all/20231122221526.2750966-1-rananta@google.com/ > +static inline void pmu_disable_reset(void) > +{ > + uint64_t pmcr = read_sysreg(pmcr_el0); > + > + /* Reset all counters, disabling them */ > + pmcr &= ~ARMV8_PMU_PMCR_E; > + write_sysreg(pmcr | ARMV8_PMU_PMCR_P, pmcr_el0); > + isb(); > +} > + > +#define RETURN_READ_PMEVCNTRN(n) \ > + return read_sysreg(pmevcntr##n##_el0) > +static inline unsigned long read_pmevcntrn(int n) > +{ > + PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN); > + return 0; > +} > + > +#define WRITE_PMEVCNTRN(n) \ > + write_sysreg(val, pmevcntr##n##_el0) > +static inline void write_pmevcntrn(int n, unsigned long val) > +{ > + PMEVN_SWITCH(n, WRITE_PMEVCNTRN); > + isb(); > +} > + > +#define READ_PMEVTYPERN(n) \ > + return read_sysreg(pmevtyper##n##_el0) > +static inline unsigned long read_pmevtypern(int n) > +{ > + PMEVN_SWITCH(n, READ_PMEVTYPERN); > + return 0; > +} > + > +#define WRITE_PMEVTYPERN(n) \ > + write_sysreg(val, pmevtyper##n##_el0) > +static inline void write_pmevtypern(int n, unsigned long val) > +{ > + PMEVN_SWITCH(n, WRITE_PMEVTYPERN); > + isb(); > +} > -- > 2.40.1 > >
On 2023-11-27 21:48, Raghavendra Rao Ananta wrote: > Hi Shaoqin, > > On Wed, Nov 22, 2023 at 10:39 PM Shaoqin Huang <shahuang@redhat.com> > wrote: >> >> Move those pmu helper function into lib/, thus it can be used by other >> pmu test. >> >> Signed-off-by: Shaoqin Huang <shahuang@redhat.com> >> --- >> .../kvm/aarch64/vpmu_counter_access.c | 118 ----------------- >> .../selftests/kvm/include/aarch64/vpmu.h | 119 >> ++++++++++++++++++ >> 2 files changed, 119 insertions(+), 118 deletions(-) >> >> diff --git a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c >> b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c >> index 17305408a334..62d6315790ab 100644 >> --- a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c >> +++ b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c >> @@ -20,12 +20,6 @@ >> #include <perf/arm_pmuv3.h> >> #include <linux/bitfield.h> >> >> -/* The max number of the PMU event counters (excluding the cycle >> counter) */ >> -#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1) >> - >> -/* The cycle counter bit position that's common among the PMU >> registers */ >> -#define ARMV8_PMU_CYCLE_IDX 31 >> - >> static struct vpmu_vm *vpmu_vm; >> >> struct pmreg_sets { >> @@ -35,118 +29,6 @@ struct pmreg_sets { >> >> #define PMREG_SET(set, clr) {.set_reg_id = set, .clr_reg_id = clr} >> >> -static uint64_t get_pmcr_n(uint64_t pmcr) >> -{ >> - return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & >> ARMV8_PMU_PMCR_N_MASK; >> -} >> - >> -static void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n) >> -{ >> - *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << >> ARMV8_PMU_PMCR_N_SHIFT); >> - *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT); >> -} >> - >> -static uint64_t get_counters_mask(uint64_t n) >> -{ >> - uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX); >> - >> - if (n) >> - mask |= GENMASK(n - 1, 0); >> - return mask; >> -} >> - >> -/* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */ >> -static inline unsigned long read_sel_evcntr(int sel) >> -{ >> - write_sysreg(sel, pmselr_el0); >> - isb(); >> - return read_sysreg(pmxevcntr_el0); >> -} >> - >> -/* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */ >> -static inline void write_sel_evcntr(int sel, unsigned long val) >> -{ >> - write_sysreg(sel, pmselr_el0); >> - isb(); >> - write_sysreg(val, pmxevcntr_el0); >> - isb(); >> -} >> - >> -/* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */ >> -static inline unsigned long read_sel_evtyper(int sel) >> -{ >> - write_sysreg(sel, pmselr_el0); >> - isb(); >> - return read_sysreg(pmxevtyper_el0); >> -} >> - >> -/* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */ >> -static inline void write_sel_evtyper(int sel, unsigned long val) >> -{ >> - write_sysreg(sel, pmselr_el0); >> - isb(); >> - write_sysreg(val, pmxevtyper_el0); >> - isb(); >> -} >> - >> -static inline void enable_counter(int idx) >> -{ >> - uint64_t v = read_sysreg(pmcntenset_el0); >> - >> - write_sysreg(BIT(idx) | v, pmcntenset_el0); >> - isb(); >> -} >> - >> -static inline void disable_counter(int idx) >> -{ >> - uint64_t v = read_sysreg(pmcntenset_el0); >> - >> - write_sysreg(BIT(idx) | v, pmcntenclr_el0); >> - isb(); >> -} >> - >> -static void pmu_disable_reset(void) >> -{ >> - uint64_t pmcr = read_sysreg(pmcr_el0); >> - >> - /* Reset all counters, disabling them */ >> - pmcr &= ~ARMV8_PMU_PMCR_E; >> - write_sysreg(pmcr | ARMV8_PMU_PMCR_P, pmcr_el0); >> - isb(); >> -} >> - >> -#define RETURN_READ_PMEVCNTRN(n) \ >> - return read_sysreg(pmevcntr##n##_el0) >> -static unsigned long read_pmevcntrn(int n) >> -{ >> - PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN); >> - return 0; >> -} >> - >> -#define WRITE_PMEVCNTRN(n) \ >> - write_sysreg(val, pmevcntr##n##_el0) >> -static void write_pmevcntrn(int n, unsigned long val) >> -{ >> - PMEVN_SWITCH(n, WRITE_PMEVCNTRN); >> - isb(); >> -} >> - >> -#define READ_PMEVTYPERN(n) \ >> - return read_sysreg(pmevtyper##n##_el0) >> -static unsigned long read_pmevtypern(int n) >> -{ >> - PMEVN_SWITCH(n, READ_PMEVTYPERN); >> - return 0; >> -} >> - >> -#define WRITE_PMEVTYPERN(n) \ >> - write_sysreg(val, pmevtyper##n##_el0) >> -static void write_pmevtypern(int n, unsigned long val) >> -{ >> - PMEVN_SWITCH(n, WRITE_PMEVTYPERN); >> - isb(); >> -} >> - >> /* >> * The pmc_accessor structure has pointers to PMEV{CNTR,TYPER}<n>_EL0 >> * accessors that test cases will use. Each of the accessors will >> diff --git a/tools/testing/selftests/kvm/include/aarch64/vpmu.h >> b/tools/testing/selftests/kvm/include/aarch64/vpmu.h >> index 0a56183644ee..e0cc1ca1c4b7 100644 >> --- a/tools/testing/selftests/kvm/include/aarch64/vpmu.h >> +++ b/tools/testing/selftests/kvm/include/aarch64/vpmu.h >> @@ -1,10 +1,17 @@ >> /* SPDX-License-Identifier: GPL-2.0 */ >> >> #include <kvm_util.h> >> +#include <perf/arm_pmuv3.h> >> >> #define GICD_BASE_GPA 0x8000000ULL >> #define GICR_BASE_GPA 0x80A0000ULL >> >> +/* The max number of the PMU event counters (excluding the cycle >> counter) */ >> +#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1) >> + >> +/* The cycle counter bit position that's common among the PMU >> registers */ >> +#define ARMV8_PMU_CYCLE_IDX 31 >> + >> struct vpmu_vm { >> struct kvm_vm *vm; >> struct kvm_vcpu *vcpu; >> @@ -14,3 +21,115 @@ struct vpmu_vm { >> struct vpmu_vm *create_vpmu_vm(void *guest_code); >> >> void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm); >> + >> +static inline uint64_t get_pmcr_n(uint64_t pmcr) >> +{ >> + return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & >> ARMV8_PMU_PMCR_N_MASK; >> +} >> + >> +static inline void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n) >> +{ >> + *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << >> ARMV8_PMU_PMCR_N_SHIFT); >> + *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT); >> +} >> + >> +static inline uint64_t get_counters_mask(uint64_t n) >> +{ >> + uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX); >> + >> + if (n) >> + mask |= GENMASK(n - 1, 0); >> + return mask; >> +} >> + >> +/* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */ >> +static inline unsigned long read_sel_evcntr(int sel) >> +{ >> + write_sysreg(sel, pmselr_el0); >> + isb(); >> + return read_sysreg(pmxevcntr_el0); >> +} >> + >> +/* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */ >> +static inline void write_sel_evcntr(int sel, unsigned long val) >> +{ >> + write_sysreg(sel, pmselr_el0); >> + isb(); >> + write_sysreg(val, pmxevcntr_el0); >> + isb(); >> +} >> + >> +/* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */ >> +static inline unsigned long read_sel_evtyper(int sel) >> +{ >> + write_sysreg(sel, pmselr_el0); >> + isb(); >> + return read_sysreg(pmxevtyper_el0); >> +} >> + >> +/* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */ >> +static inline void write_sel_evtyper(int sel, unsigned long val) >> +{ >> + write_sysreg(sel, pmselr_el0); >> + isb(); >> + write_sysreg(val, pmxevtyper_el0); >> + isb(); >> +} >> + >> +static inline void enable_counter(int idx) >> +{ >> + uint64_t v = read_sysreg(pmcntenset_el0); >> + >> + write_sysreg(BIT(idx) | v, pmcntenset_el0); >> + isb(); >> +} >> + >> +static inline void disable_counter(int idx) >> +{ >> + uint64_t v = read_sysreg(pmcntenset_el0); >> + >> + write_sysreg(BIT(idx) | v, pmcntenclr_el0); >> + isb(); >> +} >> + > As mentioned in [1], the current implementation of disable_counter() > is buggy and would end up disabling all the counters. > However if you intend to keep it (even though it would remain unused), > may be change the definition something to: > > static inline void disable_counter(int idx) > { > write_sysreg(BIT(idx), pmcntenclr_el0); > isb(); > } Same thing for the enable_counter() function, by the way. It doesn't have the same disastrous effect, but it is buggy (imagine an interrupt disabling a counter between the read and the write...). In general, the set/clr registers should always be used in their write form, never in a RMW form. Thanks, M.
Hi Raghavendra, On 11/28/23 05:48, Raghavendra Rao Ananta wrote: > Hi Shaoqin, > > On Wed, Nov 22, 2023 at 10:39 PM Shaoqin Huang <shahuang@redhat.com> wrote: >> >> Move those pmu helper function into lib/, thus it can be used by other >> pmu test. >> >> Signed-off-by: Shaoqin Huang <shahuang@redhat.com> >> --- >> .../kvm/aarch64/vpmu_counter_access.c | 118 ----------------- >> .../selftests/kvm/include/aarch64/vpmu.h | 119 ++++++++++++++++++ >> 2 files changed, 119 insertions(+), 118 deletions(-) >> >> diff --git a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c >> index 17305408a334..62d6315790ab 100644 >> --- a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c >> +++ b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c >> @@ -20,12 +20,6 @@ >> #include <perf/arm_pmuv3.h> >> #include <linux/bitfield.h> >> >> -/* The max number of the PMU event counters (excluding the cycle counter) */ >> -#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1) >> - >> -/* The cycle counter bit position that's common among the PMU registers */ >> -#define ARMV8_PMU_CYCLE_IDX 31 >> - >> static struct vpmu_vm *vpmu_vm; >> >> struct pmreg_sets { >> @@ -35,118 +29,6 @@ struct pmreg_sets { >> >> #define PMREG_SET(set, clr) {.set_reg_id = set, .clr_reg_id = clr} >> >> -static uint64_t get_pmcr_n(uint64_t pmcr) >> -{ >> - return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; >> -} >> - >> -static void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n) >> -{ >> - *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT); >> - *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT); >> -} >> - >> -static uint64_t get_counters_mask(uint64_t n) >> -{ >> - uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX); >> - >> - if (n) >> - mask |= GENMASK(n - 1, 0); >> - return mask; >> -} >> - >> -/* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */ >> -static inline unsigned long read_sel_evcntr(int sel) >> -{ >> - write_sysreg(sel, pmselr_el0); >> - isb(); >> - return read_sysreg(pmxevcntr_el0); >> -} >> - >> -/* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */ >> -static inline void write_sel_evcntr(int sel, unsigned long val) >> -{ >> - write_sysreg(sel, pmselr_el0); >> - isb(); >> - write_sysreg(val, pmxevcntr_el0); >> - isb(); >> -} >> - >> -/* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */ >> -static inline unsigned long read_sel_evtyper(int sel) >> -{ >> - write_sysreg(sel, pmselr_el0); >> - isb(); >> - return read_sysreg(pmxevtyper_el0); >> -} >> - >> -/* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */ >> -static inline void write_sel_evtyper(int sel, unsigned long val) >> -{ >> - write_sysreg(sel, pmselr_el0); >> - isb(); >> - write_sysreg(val, pmxevtyper_el0); >> - isb(); >> -} >> - >> -static inline void enable_counter(int idx) >> -{ >> - uint64_t v = read_sysreg(pmcntenset_el0); >> - >> - write_sysreg(BIT(idx) | v, pmcntenset_el0); >> - isb(); >> -} >> - >> -static inline void disable_counter(int idx) >> -{ >> - uint64_t v = read_sysreg(pmcntenset_el0); >> - >> - write_sysreg(BIT(idx) | v, pmcntenclr_el0); >> - isb(); >> -} >> - >> -static void pmu_disable_reset(void) >> -{ >> - uint64_t pmcr = read_sysreg(pmcr_el0); >> - >> - /* Reset all counters, disabling them */ >> - pmcr &= ~ARMV8_PMU_PMCR_E; >> - write_sysreg(pmcr | ARMV8_PMU_PMCR_P, pmcr_el0); >> - isb(); >> -} >> - >> -#define RETURN_READ_PMEVCNTRN(n) \ >> - return read_sysreg(pmevcntr##n##_el0) >> -static unsigned long read_pmevcntrn(int n) >> -{ >> - PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN); >> - return 0; >> -} >> - >> -#define WRITE_PMEVCNTRN(n) \ >> - write_sysreg(val, pmevcntr##n##_el0) >> -static void write_pmevcntrn(int n, unsigned long val) >> -{ >> - PMEVN_SWITCH(n, WRITE_PMEVCNTRN); >> - isb(); >> -} >> - >> -#define READ_PMEVTYPERN(n) \ >> - return read_sysreg(pmevtyper##n##_el0) >> -static unsigned long read_pmevtypern(int n) >> -{ >> - PMEVN_SWITCH(n, READ_PMEVTYPERN); >> - return 0; >> -} >> - >> -#define WRITE_PMEVTYPERN(n) \ >> - write_sysreg(val, pmevtyper##n##_el0) >> -static void write_pmevtypern(int n, unsigned long val) >> -{ >> - PMEVN_SWITCH(n, WRITE_PMEVTYPERN); >> - isb(); >> -} >> - >> /* >> * The pmc_accessor structure has pointers to PMEV{CNTR,TYPER}<n>_EL0 >> * accessors that test cases will use. Each of the accessors will >> diff --git a/tools/testing/selftests/kvm/include/aarch64/vpmu.h b/tools/testing/selftests/kvm/include/aarch64/vpmu.h >> index 0a56183644ee..e0cc1ca1c4b7 100644 >> --- a/tools/testing/selftests/kvm/include/aarch64/vpmu.h >> +++ b/tools/testing/selftests/kvm/include/aarch64/vpmu.h >> @@ -1,10 +1,17 @@ >> /* SPDX-License-Identifier: GPL-2.0 */ >> >> #include <kvm_util.h> >> +#include <perf/arm_pmuv3.h> >> >> #define GICD_BASE_GPA 0x8000000ULL >> #define GICR_BASE_GPA 0x80A0000ULL >> >> +/* The max number of the PMU event counters (excluding the cycle counter) */ >> +#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1) >> + >> +/* The cycle counter bit position that's common among the PMU registers */ >> +#define ARMV8_PMU_CYCLE_IDX 31 >> + >> struct vpmu_vm { >> struct kvm_vm *vm; >> struct kvm_vcpu *vcpu; >> @@ -14,3 +21,115 @@ struct vpmu_vm { >> struct vpmu_vm *create_vpmu_vm(void *guest_code); >> >> void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm); >> + >> +static inline uint64_t get_pmcr_n(uint64_t pmcr) >> +{ >> + return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; >> +} >> + >> +static inline void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n) >> +{ >> + *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT); >> + *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT); >> +} >> + >> +static inline uint64_t get_counters_mask(uint64_t n) >> +{ >> + uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX); >> + >> + if (n) >> + mask |= GENMASK(n - 1, 0); >> + return mask; >> +} >> + >> +/* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */ >> +static inline unsigned long read_sel_evcntr(int sel) >> +{ >> + write_sysreg(sel, pmselr_el0); >> + isb(); >> + return read_sysreg(pmxevcntr_el0); >> +} >> + >> +/* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */ >> +static inline void write_sel_evcntr(int sel, unsigned long val) >> +{ >> + write_sysreg(sel, pmselr_el0); >> + isb(); >> + write_sysreg(val, pmxevcntr_el0); >> + isb(); >> +} >> + >> +/* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */ >> +static inline unsigned long read_sel_evtyper(int sel) >> +{ >> + write_sysreg(sel, pmselr_el0); >> + isb(); >> + return read_sysreg(pmxevtyper_el0); >> +} >> + >> +/* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */ >> +static inline void write_sel_evtyper(int sel, unsigned long val) >> +{ >> + write_sysreg(sel, pmselr_el0); >> + isb(); >> + write_sysreg(val, pmxevtyper_el0); >> + isb(); >> +} >> + >> +static inline void enable_counter(int idx) >> +{ >> + uint64_t v = read_sysreg(pmcntenset_el0); >> + >> + write_sysreg(BIT(idx) | v, pmcntenset_el0); >> + isb(); >> +} >> + >> +static inline void disable_counter(int idx) >> +{ >> + uint64_t v = read_sysreg(pmcntenset_el0); >> + >> + write_sysreg(BIT(idx) | v, pmcntenclr_el0); >> + isb(); >> +} >> + > As mentioned in [1], the current implementation of disable_counter() > is buggy and would end up disabling all the counters. > However if you intend to keep it (even though it would remain unused), > may be change the definition something to: > > static inline void disable_counter(int idx) > { > write_sysreg(BIT(idx), pmcntenclr_el0); > isb(); > } > Ok. I will integrate another patch into my seris to fix this problem. Thanks, Shaoqin > Thank you. > Raghavendra > > [1]: https://lore.kernel.org/all/20231122221526.2750966-1-rananta@google.com/ > >> +static inline void pmu_disable_reset(void) >> +{ >> + uint64_t pmcr = read_sysreg(pmcr_el0); >> + >> + /* Reset all counters, disabling them */ >> + pmcr &= ~ARMV8_PMU_PMCR_E; >> + write_sysreg(pmcr | ARMV8_PMU_PMCR_P, pmcr_el0); >> + isb(); >> +} >> + >> +#define RETURN_READ_PMEVCNTRN(n) \ >> + return read_sysreg(pmevcntr##n##_el0) >> +static inline unsigned long read_pmevcntrn(int n) >> +{ >> + PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN); >> + return 0; >> +} >> + >> +#define WRITE_PMEVCNTRN(n) \ >> + write_sysreg(val, pmevcntr##n##_el0) >> +static inline void write_pmevcntrn(int n, unsigned long val) >> +{ >> + PMEVN_SWITCH(n, WRITE_PMEVCNTRN); >> + isb(); >> +} >> + >> +#define READ_PMEVTYPERN(n) \ >> + return read_sysreg(pmevtyper##n##_el0) >> +static inline unsigned long read_pmevtypern(int n) >> +{ >> + PMEVN_SWITCH(n, READ_PMEVTYPERN); >> + return 0; >> +} >> + >> +#define WRITE_PMEVTYPERN(n) \ >> + write_sysreg(val, pmevtyper##n##_el0) >> +static inline void write_pmevtypern(int n, unsigned long val) >> +{ >> + PMEVN_SWITCH(n, WRITE_PMEVTYPERN); >> + isb(); >> +} >> -- >> 2.40.1 >> >> >
Hi Marc, On 11/28/23 16:43, Marc Zyngier wrote: > On 2023-11-27 21:48, Raghavendra Rao Ananta wrote: >> Hi Shaoqin, >> >> On Wed, Nov 22, 2023 at 10:39 PM Shaoqin Huang <shahuang@redhat.com> >> wrote: >>> >>> Move those pmu helper function into lib/, thus it can be used by other >>> pmu test. >>> >>> Signed-off-by: Shaoqin Huang <shahuang@redhat.com> >>> --- >>> .../kvm/aarch64/vpmu_counter_access.c | 118 ----------------- >>> .../selftests/kvm/include/aarch64/vpmu.h | 119 ++++++++++++++++++ >>> 2 files changed, 119 insertions(+), 118 deletions(-) >>> >>> diff --git >>> a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c >>> b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c >>> index 17305408a334..62d6315790ab 100644 >>> --- a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c >>> +++ b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c >>> @@ -20,12 +20,6 @@ >>> #include <perf/arm_pmuv3.h> >>> #include <linux/bitfield.h> >>> >>> -/* The max number of the PMU event counters (excluding the cycle >>> counter) */ >>> -#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1) >>> - >>> -/* The cycle counter bit position that's common among the PMU >>> registers */ >>> -#define ARMV8_PMU_CYCLE_IDX 31 >>> - >>> static struct vpmu_vm *vpmu_vm; >>> >>> struct pmreg_sets { >>> @@ -35,118 +29,6 @@ struct pmreg_sets { >>> >>> #define PMREG_SET(set, clr) {.set_reg_id = set, .clr_reg_id = clr} >>> >>> -static uint64_t get_pmcr_n(uint64_t pmcr) >>> -{ >>> - return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; >>> -} >>> - >>> -static void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n) >>> -{ >>> - *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << >>> ARMV8_PMU_PMCR_N_SHIFT); >>> - *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT); >>> -} >>> - >>> -static uint64_t get_counters_mask(uint64_t n) >>> -{ >>> - uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX); >>> - >>> - if (n) >>> - mask |= GENMASK(n - 1, 0); >>> - return mask; >>> -} >>> - >>> -/* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */ >>> -static inline unsigned long read_sel_evcntr(int sel) >>> -{ >>> - write_sysreg(sel, pmselr_el0); >>> - isb(); >>> - return read_sysreg(pmxevcntr_el0); >>> -} >>> - >>> -/* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */ >>> -static inline void write_sel_evcntr(int sel, unsigned long val) >>> -{ >>> - write_sysreg(sel, pmselr_el0); >>> - isb(); >>> - write_sysreg(val, pmxevcntr_el0); >>> - isb(); >>> -} >>> - >>> -/* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */ >>> -static inline unsigned long read_sel_evtyper(int sel) >>> -{ >>> - write_sysreg(sel, pmselr_el0); >>> - isb(); >>> - return read_sysreg(pmxevtyper_el0); >>> -} >>> - >>> -/* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */ >>> -static inline void write_sel_evtyper(int sel, unsigned long val) >>> -{ >>> - write_sysreg(sel, pmselr_el0); >>> - isb(); >>> - write_sysreg(val, pmxevtyper_el0); >>> - isb(); >>> -} >>> - >>> -static inline void enable_counter(int idx) >>> -{ >>> - uint64_t v = read_sysreg(pmcntenset_el0); >>> - >>> - write_sysreg(BIT(idx) | v, pmcntenset_el0); >>> - isb(); >>> -} >>> - >>> -static inline void disable_counter(int idx) >>> -{ >>> - uint64_t v = read_sysreg(pmcntenset_el0); >>> - >>> - write_sysreg(BIT(idx) | v, pmcntenclr_el0); >>> - isb(); >>> -} >>> - >>> -static void pmu_disable_reset(void) >>> -{ >>> - uint64_t pmcr = read_sysreg(pmcr_el0); >>> - >>> - /* Reset all counters, disabling them */ >>> - pmcr &= ~ARMV8_PMU_PMCR_E; >>> - write_sysreg(pmcr | ARMV8_PMU_PMCR_P, pmcr_el0); >>> - isb(); >>> -} >>> - >>> -#define RETURN_READ_PMEVCNTRN(n) \ >>> - return read_sysreg(pmevcntr##n##_el0) >>> -static unsigned long read_pmevcntrn(int n) >>> -{ >>> - PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN); >>> - return 0; >>> -} >>> - >>> -#define WRITE_PMEVCNTRN(n) \ >>> - write_sysreg(val, pmevcntr##n##_el0) >>> -static void write_pmevcntrn(int n, unsigned long val) >>> -{ >>> - PMEVN_SWITCH(n, WRITE_PMEVCNTRN); >>> - isb(); >>> -} >>> - >>> -#define READ_PMEVTYPERN(n) \ >>> - return read_sysreg(pmevtyper##n##_el0) >>> -static unsigned long read_pmevtypern(int n) >>> -{ >>> - PMEVN_SWITCH(n, READ_PMEVTYPERN); >>> - return 0; >>> -} >>> - >>> -#define WRITE_PMEVTYPERN(n) \ >>> - write_sysreg(val, pmevtyper##n##_el0) >>> -static void write_pmevtypern(int n, unsigned long val) >>> -{ >>> - PMEVN_SWITCH(n, WRITE_PMEVTYPERN); >>> - isb(); >>> -} >>> - >>> /* >>> * The pmc_accessor structure has pointers to PMEV{CNTR,TYPER}<n>_EL0 >>> * accessors that test cases will use. Each of the accessors will >>> diff --git a/tools/testing/selftests/kvm/include/aarch64/vpmu.h >>> b/tools/testing/selftests/kvm/include/aarch64/vpmu.h >>> index 0a56183644ee..e0cc1ca1c4b7 100644 >>> --- a/tools/testing/selftests/kvm/include/aarch64/vpmu.h >>> +++ b/tools/testing/selftests/kvm/include/aarch64/vpmu.h >>> @@ -1,10 +1,17 @@ >>> /* SPDX-License-Identifier: GPL-2.0 */ >>> >>> #include <kvm_util.h> >>> +#include <perf/arm_pmuv3.h> >>> >>> #define GICD_BASE_GPA 0x8000000ULL >>> #define GICR_BASE_GPA 0x80A0000ULL >>> >>> +/* The max number of the PMU event counters (excluding the cycle >>> counter) */ >>> +#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1) >>> + >>> +/* The cycle counter bit position that's common among the PMU >>> registers */ >>> +#define ARMV8_PMU_CYCLE_IDX 31 >>> + >>> struct vpmu_vm { >>> struct kvm_vm *vm; >>> struct kvm_vcpu *vcpu; >>> @@ -14,3 +21,115 @@ struct vpmu_vm { >>> struct vpmu_vm *create_vpmu_vm(void *guest_code); >>> >>> void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm); >>> + >>> +static inline uint64_t get_pmcr_n(uint64_t pmcr) >>> +{ >>> + return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; >>> +} >>> + >>> +static inline void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n) >>> +{ >>> + *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << >>> ARMV8_PMU_PMCR_N_SHIFT); >>> + *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT); >>> +} >>> + >>> +static inline uint64_t get_counters_mask(uint64_t n) >>> +{ >>> + uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX); >>> + >>> + if (n) >>> + mask |= GENMASK(n - 1, 0); >>> + return mask; >>> +} >>> + >>> +/* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */ >>> +static inline unsigned long read_sel_evcntr(int sel) >>> +{ >>> + write_sysreg(sel, pmselr_el0); >>> + isb(); >>> + return read_sysreg(pmxevcntr_el0); >>> +} >>> + >>> +/* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */ >>> +static inline void write_sel_evcntr(int sel, unsigned long val) >>> +{ >>> + write_sysreg(sel, pmselr_el0); >>> + isb(); >>> + write_sysreg(val, pmxevcntr_el0); >>> + isb(); >>> +} >>> + >>> +/* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */ >>> +static inline unsigned long read_sel_evtyper(int sel) >>> +{ >>> + write_sysreg(sel, pmselr_el0); >>> + isb(); >>> + return read_sysreg(pmxevtyper_el0); >>> +} >>> + >>> +/* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */ >>> +static inline void write_sel_evtyper(int sel, unsigned long val) >>> +{ >>> + write_sysreg(sel, pmselr_el0); >>> + isb(); >>> + write_sysreg(val, pmxevtyper_el0); >>> + isb(); >>> +} >>> + >>> +static inline void enable_counter(int idx) >>> +{ >>> + uint64_t v = read_sysreg(pmcntenset_el0); >>> + >>> + write_sysreg(BIT(idx) | v, pmcntenset_el0); >>> + isb(); >>> +} >>> + >>> +static inline void disable_counter(int idx) >>> +{ >>> + uint64_t v = read_sysreg(pmcntenset_el0); >>> + >>> + write_sysreg(BIT(idx) | v, pmcntenclr_el0); >>> + isb(); >>> +} >>> + >> As mentioned in [1], the current implementation of disable_counter() >> is buggy and would end up disabling all the counters. >> However if you intend to keep it (even though it would remain unused), >> may be change the definition something to: >> >> static inline void disable_counter(int idx) >> { >> write_sysreg(BIT(idx), pmcntenclr_el0); >> isb(); >> } > > Same thing for the enable_counter() function, by the way. > It doesn't have the same disastrous effect, but it is > buggy (imagine an interrupt disabling a counter between > the read and the write...). > > In general, the set/clr registers should always be used > in their write form, never in a RMW form. > Thanks for your explaination. I will fix the enable_counter together with the disable_counter. Thanks, Shaoqin > Thanks, > > M.
diff --git a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c index 17305408a334..62d6315790ab 100644 --- a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c +++ b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c @@ -20,12 +20,6 @@ #include <perf/arm_pmuv3.h> #include <linux/bitfield.h> -/* The max number of the PMU event counters (excluding the cycle counter) */ -#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1) - -/* The cycle counter bit position that's common among the PMU registers */ -#define ARMV8_PMU_CYCLE_IDX 31 - static struct vpmu_vm *vpmu_vm; struct pmreg_sets { @@ -35,118 +29,6 @@ struct pmreg_sets { #define PMREG_SET(set, clr) {.set_reg_id = set, .clr_reg_id = clr} -static uint64_t get_pmcr_n(uint64_t pmcr) -{ - return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; -} - -static void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n) -{ - *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT); - *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT); -} - -static uint64_t get_counters_mask(uint64_t n) -{ - uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX); - - if (n) - mask |= GENMASK(n - 1, 0); - return mask; -} - -/* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */ -static inline unsigned long read_sel_evcntr(int sel) -{ - write_sysreg(sel, pmselr_el0); - isb(); - return read_sysreg(pmxevcntr_el0); -} - -/* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */ -static inline void write_sel_evcntr(int sel, unsigned long val) -{ - write_sysreg(sel, pmselr_el0); - isb(); - write_sysreg(val, pmxevcntr_el0); - isb(); -} - -/* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */ -static inline unsigned long read_sel_evtyper(int sel) -{ - write_sysreg(sel, pmselr_el0); - isb(); - return read_sysreg(pmxevtyper_el0); -} - -/* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */ -static inline void write_sel_evtyper(int sel, unsigned long val) -{ - write_sysreg(sel, pmselr_el0); - isb(); - write_sysreg(val, pmxevtyper_el0); - isb(); -} - -static inline void enable_counter(int idx) -{ - uint64_t v = read_sysreg(pmcntenset_el0); - - write_sysreg(BIT(idx) | v, pmcntenset_el0); - isb(); -} - -static inline void disable_counter(int idx) -{ - uint64_t v = read_sysreg(pmcntenset_el0); - - write_sysreg(BIT(idx) | v, pmcntenclr_el0); - isb(); -} - -static void pmu_disable_reset(void) -{ - uint64_t pmcr = read_sysreg(pmcr_el0); - - /* Reset all counters, disabling them */ - pmcr &= ~ARMV8_PMU_PMCR_E; - write_sysreg(pmcr | ARMV8_PMU_PMCR_P, pmcr_el0); - isb(); -} - -#define RETURN_READ_PMEVCNTRN(n) \ - return read_sysreg(pmevcntr##n##_el0) -static unsigned long read_pmevcntrn(int n) -{ - PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN); - return 0; -} - -#define WRITE_PMEVCNTRN(n) \ - write_sysreg(val, pmevcntr##n##_el0) -static void write_pmevcntrn(int n, unsigned long val) -{ - PMEVN_SWITCH(n, WRITE_PMEVCNTRN); - isb(); -} - -#define READ_PMEVTYPERN(n) \ - return read_sysreg(pmevtyper##n##_el0) -static unsigned long read_pmevtypern(int n) -{ - PMEVN_SWITCH(n, READ_PMEVTYPERN); - return 0; -} - -#define WRITE_PMEVTYPERN(n) \ - write_sysreg(val, pmevtyper##n##_el0) -static void write_pmevtypern(int n, unsigned long val) -{ - PMEVN_SWITCH(n, WRITE_PMEVTYPERN); - isb(); -} - /* * The pmc_accessor structure has pointers to PMEV{CNTR,TYPER}<n>_EL0 * accessors that test cases will use. Each of the accessors will diff --git a/tools/testing/selftests/kvm/include/aarch64/vpmu.h b/tools/testing/selftests/kvm/include/aarch64/vpmu.h index 0a56183644ee..e0cc1ca1c4b7 100644 --- a/tools/testing/selftests/kvm/include/aarch64/vpmu.h +++ b/tools/testing/selftests/kvm/include/aarch64/vpmu.h @@ -1,10 +1,17 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include <kvm_util.h> +#include <perf/arm_pmuv3.h> #define GICD_BASE_GPA 0x8000000ULL #define GICR_BASE_GPA 0x80A0000ULL +/* The max number of the PMU event counters (excluding the cycle counter) */ +#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1) + +/* The cycle counter bit position that's common among the PMU registers */ +#define ARMV8_PMU_CYCLE_IDX 31 + struct vpmu_vm { struct kvm_vm *vm; struct kvm_vcpu *vcpu; @@ -14,3 +21,115 @@ struct vpmu_vm { struct vpmu_vm *create_vpmu_vm(void *guest_code); void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm); + +static inline uint64_t get_pmcr_n(uint64_t pmcr) +{ + return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; +} + +static inline void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n) +{ + *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT); + *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT); +} + +static inline uint64_t get_counters_mask(uint64_t n) +{ + uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX); + + if (n) + mask |= GENMASK(n - 1, 0); + return mask; +} + +/* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */ +static inline unsigned long read_sel_evcntr(int sel) +{ + write_sysreg(sel, pmselr_el0); + isb(); + return read_sysreg(pmxevcntr_el0); +} + +/* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */ +static inline void write_sel_evcntr(int sel, unsigned long val) +{ + write_sysreg(sel, pmselr_el0); + isb(); + write_sysreg(val, pmxevcntr_el0); + isb(); +} + +/* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */ +static inline unsigned long read_sel_evtyper(int sel) +{ + write_sysreg(sel, pmselr_el0); + isb(); + return read_sysreg(pmxevtyper_el0); +} + +/* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */ +static inline void write_sel_evtyper(int sel, unsigned long val) +{ + write_sysreg(sel, pmselr_el0); + isb(); + write_sysreg(val, pmxevtyper_el0); + isb(); +} + +static inline void enable_counter(int idx) +{ + uint64_t v = read_sysreg(pmcntenset_el0); + + write_sysreg(BIT(idx) | v, pmcntenset_el0); + isb(); +} + +static inline void disable_counter(int idx) +{ + uint64_t v = read_sysreg(pmcntenset_el0); + + write_sysreg(BIT(idx) | v, pmcntenclr_el0); + isb(); +} + +static inline void pmu_disable_reset(void) +{ + uint64_t pmcr = read_sysreg(pmcr_el0); + + /* Reset all counters, disabling them */ + pmcr &= ~ARMV8_PMU_PMCR_E; + write_sysreg(pmcr | ARMV8_PMU_PMCR_P, pmcr_el0); + isb(); +} + +#define RETURN_READ_PMEVCNTRN(n) \ + return read_sysreg(pmevcntr##n##_el0) +static inline unsigned long read_pmevcntrn(int n) +{ + PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN); + return 0; +} + +#define WRITE_PMEVCNTRN(n) \ + write_sysreg(val, pmevcntr##n##_el0) +static inline void write_pmevcntrn(int n, unsigned long val) +{ + PMEVN_SWITCH(n, WRITE_PMEVCNTRN); + isb(); +} + +#define READ_PMEVTYPERN(n) \ + return read_sysreg(pmevtyper##n##_el0) +static inline unsigned long read_pmevtypern(int n) +{ + PMEVN_SWITCH(n, READ_PMEVTYPERN); + return 0; +} + +#define WRITE_PMEVTYPERN(n) \ + write_sysreg(val, pmevtyper##n##_el0) +static inline void write_pmevtypern(int n, unsigned long val) +{ + PMEVN_SWITCH(n, WRITE_PMEVTYPERN); + isb(); +}
Move those pmu helper function into lib/, thus it can be used by other pmu test. Signed-off-by: Shaoqin Huang <shahuang@redhat.com> --- .../kvm/aarch64/vpmu_counter_access.c | 118 ----------------- .../selftests/kvm/include/aarch64/vpmu.h | 119 ++++++++++++++++++ 2 files changed, 119 insertions(+), 118 deletions(-)