Message ID | 20230126204444.2204061-8-zalbassam@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | perf: arm: Make PMUv3 driver available for aarch32 | expand |
On Thu, 26 Jan 2023 20:44:43 +0000, Zaid Al-Bassam <zalbassam@google.com> wrote: > > From: Marc Zyngier <marc.zyngier@arm.com> > > The only thing stopping the PMUv3 driver from compiling on 32bit > is the lack of defined system registers names. This is easily > solved by providing the sysreg accessors and updating the Kconfig entry. > > Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> > Co-developed-by: Zaid Al-Bassam <zalbassam@google.com> > Signed-off-by: Zaid Al-Bassam <zalbassam@google.com> > --- > arch/arm/include/asm/arm_pmuv3.h | 238 +++++++++++++++++++++++++++++++ > drivers/perf/Kconfig | 5 +- > 2 files changed, 240 insertions(+), 3 deletions(-) > create mode 100644 arch/arm/include/asm/arm_pmuv3.h > > diff --git a/arch/arm/include/asm/arm_pmuv3.h b/arch/arm/include/asm/arm_pmuv3.h > new file mode 100644 > index 000000000000..816873c74eda > --- /dev/null > +++ b/arch/arm/include/asm/arm_pmuv3.h > @@ -0,0 +1,238 @@ > +/* SPDX-License-Identifier: GPL-2.0 */ > +/* > + * Copyright (C) 2012 ARM Ltd. > + * > + * This program is free software; you can redistribute it and/or modify > + * it under the terms of the GNU General Public License version 2 as > + * published by the Free Software Foundation. > + * > + * This program is distributed in the hope that it will be useful, > + * but WITHOUT ANY WARRANTY; without even the implied warranty of > + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the > + * GNU General Public License for more details. > + * > + * You should have received a copy of the GNU General Public License > + * along with this program. If not, see <http://www.gnu.org/licenses/>. > + */ > + > +#ifndef __ASM_PMUV3_H > +#define __ASM_PMUV3_H > + > +#include <asm/cp15.h> > +#include <asm/cputype.h> > + > +#define PMCCNTR __ACCESS_CP15_64(0, c9) > + > +#define PMCR __ACCESS_CP15(c9, 0, c12, 0) > +#define PMCNTENSET __ACCESS_CP15(c9, 0, c12, 1) > +#define PMCNTENCLR __ACCESS_CP15(c9, 0, c12, 2) > +#define PMOVSR __ACCESS_CP15(c9, 0, c12, 3) > +#define PMSELR __ACCESS_CP15(c9, 0, c12, 5) > +#define PMCEID0 __ACCESS_CP15(c9, 0, c12, 6) > +#define PMCEID1 __ACCESS_CP15(c9, 0, c12, 7) > +#define PMXEVTYPER __ACCESS_CP15(c9, 0, c13, 1) > +#define PMXEVCNTR __ACCESS_CP15(c9, 0, c13, 2) > +#define PMUSERENR __ACCESS_CP15(c9, 0, c14, 0) > +#define PMINTENSET __ACCESS_CP15(c9, 0, c14, 1) > +#define PMINTENCLR __ACCESS_CP15(c9, 0, c14, 2) > +#define PMMIR __ACCESS_CP15(c9, 0, c14, 6) > +#define PMCCFILTR __ACCESS_CP15(c14, 0, c15, 7) > +#define PMEVCNTR0(n) __ACCESS_CP15(c14, 0, c8, n) > +#define PMEVCNTR1(n) __ACCESS_CP15(c14, 0, c9, n) > +#define PMEVCNTR2(n) __ACCESS_CP15(c14, 0, c10, n) > +#define PMEVCNTR3(n) __ACCESS_CP15(c14, 0, c11, n) > +#define PMEVTYPER0(n) __ACCESS_CP15(c14, 0, c12, n) > +#define PMEVTYPER1(n) __ACCESS_CP15(c14, 0, c13, n) > +#define PMEVTYPER2(n) __ACCESS_CP15(c14, 0, c14, n) > +#define PMEVTYPER3(n) __ACCESS_CP15(c14, 0, c15, n) > + > +#define PMEV_EVENTS_PER_REG 8 > +#define PMEV_REGISTER(n) (n / PMEV_EVENTS_PER_REG) > +#define PMEV_EVENT(n) (n % PMEV_EVENTS_PER_REG) > + > +#define PMEV_CASE(reg, ev, case_macro) \ > + case ev: \ > + case_macro(reg, ev); \ > + break > + > +#define PMEV_EV_SWITCH(reg, ev, case_macro) \ > + do { \ > + switch (ev) { \ > + PMEV_CASE(reg, 0, case_macro); \ > + PMEV_CASE(reg, 1, case_macro); \ > + PMEV_CASE(reg, 2, case_macro); \ > + PMEV_CASE(reg, 3, case_macro); \ > + PMEV_CASE(reg, 4, case_macro); \ > + PMEV_CASE(reg, 5, case_macro); \ > + PMEV_CASE(reg, 6, case_macro); \ > + PMEV_CASE(reg, 7, case_macro); \ > + default: \ > + WARN(1, "Invalid PMEV* event index\n"); \ > + } \ > + } while (0) Please fix your editor, the indentation of the "\" is totally wrong. > + > +#define PMEV_REG_SWITCH(reg, ev, case_macro) \ > + do { \ > + switch (reg) { \ > + case 0: \ > + PMEV_EV_SWITCH(0, ev, case_macro); \ > + break; \ > + case 1: \ > + PMEV_EV_SWITCH(1, ev, case_macro); \ > + break; \ > + case 2: \ > + PMEV_EV_SWITCH(2, ev, case_macro); \ > + break; \ > + case 3: \ > + PMEV_EV_SWITCH(3, ev, case_macro); \ > + break; \ > + default: \ > + WARN(1, "Invalid PMEV* register index\n"); \ > + } \ > + } while (0) No, please don't do that. This makes the whole thing unmaintainable to macro abuse. It also makes the code generation absolutely horrible, due to the switch nesting. Just look at the disassembly. The right way to do that is to declare the registers, one after the other, all 60 of them, and then use the arm64 big switch *unchanged*. You could even share it between the two architectures. The codegen is slightly bad (one big switch), and it is now trivial to read. See below for the actual change. M. diff --git a/arch/arm/include/asm/arm_pmuv3.h b/arch/arm/include/asm/arm_pmuv3.h index 816873c74eda..4d483e055519 100644 --- a/arch/arm/include/asm/arm_pmuv3.h +++ b/arch/arm/include/asm/arm_pmuv3.h @@ -37,89 +37,132 @@ #define PMINTENCLR __ACCESS_CP15(c9, 0, c14, 2) #define PMMIR __ACCESS_CP15(c9, 0, c14, 6) #define PMCCFILTR __ACCESS_CP15(c14, 0, c15, 7) -#define PMEVCNTR0(n) __ACCESS_CP15(c14, 0, c8, n) -#define PMEVCNTR1(n) __ACCESS_CP15(c14, 0, c9, n) -#define PMEVCNTR2(n) __ACCESS_CP15(c14, 0, c10, n) -#define PMEVCNTR3(n) __ACCESS_CP15(c14, 0, c11, n) -#define PMEVTYPER0(n) __ACCESS_CP15(c14, 0, c12, n) -#define PMEVTYPER1(n) __ACCESS_CP15(c14, 0, c13, n) -#define PMEVTYPER2(n) __ACCESS_CP15(c14, 0, c14, n) -#define PMEVTYPER3(n) __ACCESS_CP15(c14, 0, c15, n) - -#define PMEV_EVENTS_PER_REG 8 -#define PMEV_REGISTER(n) (n / PMEV_EVENTS_PER_REG) -#define PMEV_EVENT(n) (n % PMEV_EVENTS_PER_REG) - -#define PMEV_CASE(reg, ev, case_macro) \ - case ev: \ - case_macro(reg, ev); \ - break - -#define PMEV_EV_SWITCH(reg, ev, case_macro) \ - do { \ - switch (ev) { \ - PMEV_CASE(reg, 0, case_macro); \ - PMEV_CASE(reg, 1, case_macro); \ - PMEV_CASE(reg, 2, case_macro); \ - PMEV_CASE(reg, 3, case_macro); \ - PMEV_CASE(reg, 4, case_macro); \ - PMEV_CASE(reg, 5, case_macro); \ - PMEV_CASE(reg, 6, case_macro); \ - PMEV_CASE(reg, 7, case_macro); \ - default: \ - WARN(1, "Invalid PMEV* event index\n"); \ - } \ - } while (0) -#define PMEV_REG_SWITCH(reg, ev, case_macro) \ - do { \ - switch (reg) { \ - case 0: \ - PMEV_EV_SWITCH(0, ev, case_macro); \ - break; \ - case 1: \ - PMEV_EV_SWITCH(1, ev, case_macro); \ - break; \ - case 2: \ - PMEV_EV_SWITCH(2, ev, case_macro); \ - break; \ - case 3: \ - PMEV_EV_SWITCH(3, ev, case_macro); \ - break; \ - default: \ - WARN(1, "Invalid PMEV* register index\n"); \ - } \ +#define PMEVCNTR0 __ACCESS_CP15(c14, 0, c8, 0) +#define PMEVCNTR1 __ACCESS_CP15(c14, 0, c8, 1) +#define PMEVCNTR2 __ACCESS_CP15(c14, 0, c8, 2) +#define PMEVCNTR3 __ACCESS_CP15(c14, 0, c8, 3) +#define PMEVCNTR4 __ACCESS_CP15(c14, 0, c8, 4) +#define PMEVCNTR5 __ACCESS_CP15(c14, 0, c8, 5) +#define PMEVCNTR6 __ACCESS_CP15(c14, 0, c8, 6) +#define PMEVCNTR7 __ACCESS_CP15(c14, 0, c8, 7) +#define PMEVCNTR8 __ACCESS_CP15(c14, 0, c9, 0) +#define PMEVCNTR9 __ACCESS_CP15(c14, 0, c9, 1) +#define PMEVCNTR10 __ACCESS_CP15(c14, 0, c9, 2) +#define PMEVCNTR11 __ACCESS_CP15(c14, 0, c9, 3) +#define PMEVCNTR12 __ACCESS_CP15(c14, 0, c9, 4) +#define PMEVCNTR13 __ACCESS_CP15(c14, 0, c9, 5) +#define PMEVCNTR14 __ACCESS_CP15(c14, 0, c9, 6) +#define PMEVCNTR15 __ACCESS_CP15(c14, 0, c9, 7) +#define PMEVCNTR16 __ACCESS_CP15(c14, 0, c10, 0) +#define PMEVCNTR17 __ACCESS_CP15(c14, 0, c10, 1) +#define PMEVCNTR18 __ACCESS_CP15(c14, 0, c10, 2) +#define PMEVCNTR19 __ACCESS_CP15(c14, 0, c10, 3) +#define PMEVCNTR20 __ACCESS_CP15(c14, 0, c10, 4) +#define PMEVCNTR21 __ACCESS_CP15(c14, 0, c10, 5) +#define PMEVCNTR22 __ACCESS_CP15(c14, 0, c10, 6) +#define PMEVCNTR23 __ACCESS_CP15(c14, 0, c10, 7) +#define PMEVCNTR24 __ACCESS_CP15(c14, 0, c11, 0) +#define PMEVCNTR25 __ACCESS_CP15(c14, 0, c11, 1) +#define PMEVCNTR26 __ACCESS_CP15(c14, 0, c11, 2) +#define PMEVCNTR27 __ACCESS_CP15(c14, 0, c11, 3) +#define PMEVCNTR28 __ACCESS_CP15(c14, 0, c11, 4) +#define PMEVCNTR29 __ACCESS_CP15(c14, 0, c11, 5) +#define PMEVCNTR30 __ACCESS_CP15(c14, 0, c11, 6) + +#define PMEVTYPER0 __ACCESS_CP15(c14, 0, c12, 0) +#define PMEVTYPER1 __ACCESS_CP15(c14, 0, c12, 1) +#define PMEVTYPER2 __ACCESS_CP15(c14, 0, c12, 2) +#define PMEVTYPER3 __ACCESS_CP15(c14, 0, c12, 3) +#define PMEVTYPER4 __ACCESS_CP15(c14, 0, c12, 4) +#define PMEVTYPER5 __ACCESS_CP15(c14, 0, c12, 5) +#define PMEVTYPER6 __ACCESS_CP15(c14, 0, c12, 6) +#define PMEVTYPER7 __ACCESS_CP15(c14, 0, c12, 7) +#define PMEVTYPER8 __ACCESS_CP15(c14, 0, c13, 0) +#define PMEVTYPER9 __ACCESS_CP15(c14, 0, c13, 1) +#define PMEVTYPER10 __ACCESS_CP15(c14, 0, c13, 2) +#define PMEVTYPER11 __ACCESS_CP15(c14, 0, c13, 3) +#define PMEVTYPER12 __ACCESS_CP15(c14, 0, c13, 4) +#define PMEVTYPER13 __ACCESS_CP15(c14, 0, c13, 5) +#define PMEVTYPER14 __ACCESS_CP15(c14, 0, c13, 6) +#define PMEVTYPER15 __ACCESS_CP15(c14, 0, c13, 7) +#define PMEVTYPER16 __ACCESS_CP15(c14, 0, c14, 0) +#define PMEVTYPER17 __ACCESS_CP15(c14, 0, c14, 1) +#define PMEVTYPER18 __ACCESS_CP15(c14, 0, c14, 2) +#define PMEVTYPER19 __ACCESS_CP15(c14, 0, c14, 3) +#define PMEVTYPER20 __ACCESS_CP15(c14, 0, c14, 4) +#define PMEVTYPER21 __ACCESS_CP15(c14, 0, c14, 5) +#define PMEVTYPER22 __ACCESS_CP15(c14, 0, c14, 6) +#define PMEVTYPER23 __ACCESS_CP15(c14, 0, c14, 7) +#define PMEVTYPER24 __ACCESS_CP15(c14, 0, c15, 0) +#define PMEVTYPER25 __ACCESS_CP15(c14, 0, c15, 1) +#define PMEVTYPER26 __ACCESS_CP15(c14, 0, c15, 2) +#define PMEVTYPER27 __ACCESS_CP15(c14, 0, c15, 3) +#define PMEVTYPER28 __ACCESS_CP15(c14, 0, c15, 4) +#define PMEVTYPER29 __ACCESS_CP15(c14, 0, c15, 5) +#define PMEVTYPER30 __ACCESS_CP15(c14, 0, c15, 6) + +#define PMEVN_CASE(n, case_macro) \ + case n: case_macro(n); break + +#define PMEVN_SWITCH(x, case_macro) \ + do { \ + switch (x) { \ + PMEVN_CASE(0, case_macro); \ + PMEVN_CASE(1, case_macro); \ + PMEVN_CASE(2, case_macro); \ + PMEVN_CASE(3, case_macro); \ + PMEVN_CASE(4, case_macro); \ + PMEVN_CASE(5, case_macro); \ + PMEVN_CASE(6, case_macro); \ + PMEVN_CASE(7, case_macro); \ + PMEVN_CASE(8, case_macro); \ + PMEVN_CASE(9, case_macro); \ + PMEVN_CASE(10, case_macro); \ + PMEVN_CASE(11, case_macro); \ + PMEVN_CASE(12, case_macro); \ + PMEVN_CASE(13, case_macro); \ + PMEVN_CASE(14, case_macro); \ + PMEVN_CASE(15, case_macro); \ + PMEVN_CASE(16, case_macro); \ + PMEVN_CASE(17, case_macro); \ + PMEVN_CASE(18, case_macro); \ + PMEVN_CASE(19, case_macro); \ + PMEVN_CASE(20, case_macro); \ + PMEVN_CASE(21, case_macro); \ + PMEVN_CASE(22, case_macro); \ + PMEVN_CASE(23, case_macro); \ + PMEVN_CASE(24, case_macro); \ + PMEVN_CASE(25, case_macro); \ + PMEVN_CASE(26, case_macro); \ + PMEVN_CASE(27, case_macro); \ + PMEVN_CASE(28, case_macro); \ + PMEVN_CASE(29, case_macro); \ + PMEVN_CASE(30, case_macro); \ + default: WARN(1, "Invalid PMEV* index\n"); \ + } \ } while (0) -#define RETURN_READ_PMEVCNTR(reg, ev) \ - return read_sysreg(PMEVCNTR##reg(ev)) +#define RETURN_READ_PMEVCNTRN(n) \ + return read_sysreg(PMEVCNTR##n) static unsigned long read_pmevcntrn(int n) { - const int reg = PMEV_REGISTER(n); - const int event = PMEV_EVENT(n); - - PMEV_REG_SWITCH(reg, event, RETURN_READ_PMEVCNTR); + PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN); return 0; } -#define WRITE_PMEVCNTR(reg, ev) \ - write_sysreg(val, PMEVCNTR##reg(ev)) +#define WRITE_PMEVCNTRN(n) \ + write_sysreg(val, PMEVCNTR##n) static void write_pmevcntrn(int n, unsigned long val) { - const int reg = PMEV_REGISTER(n); - const int event = PMEV_EVENT(n); - - PMEV_REG_SWITCH(reg, event, WRITE_PMEVCNTR); + PMEVN_SWITCH(n, WRITE_PMEVCNTRN); } -#define WRITE_PMEVTYPER(reg, ev) \ - write_sysreg(val, PMEVTYPER##reg(ev)) +#define WRITE_PMEVTYPERN(n) \ + write_sysreg(val, PMEVTYPER##n) static void write_pmevtypern(int n, unsigned long val) { - const int reg = PMEV_REGISTER(n); - const int event = PMEV_EVENT(n); - - PMEV_REG_SWITCH(reg, event, WRITE_PMEVTYPER); + PMEVN_SWITCH(n, WRITE_PMEVTYPERN); } static inline unsigned long read_pmmir(void)
diff --git a/arch/arm/include/asm/arm_pmuv3.h b/arch/arm/include/asm/arm_pmuv3.h new file mode 100644 index 000000000000..816873c74eda --- /dev/null +++ b/arch/arm/include/asm/arm_pmuv3.h @@ -0,0 +1,238 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ASM_PMUV3_H +#define __ASM_PMUV3_H + +#include <asm/cp15.h> +#include <asm/cputype.h> + +#define PMCCNTR __ACCESS_CP15_64(0, c9) + +#define PMCR __ACCESS_CP15(c9, 0, c12, 0) +#define PMCNTENSET __ACCESS_CP15(c9, 0, c12, 1) +#define PMCNTENCLR __ACCESS_CP15(c9, 0, c12, 2) +#define PMOVSR __ACCESS_CP15(c9, 0, c12, 3) +#define PMSELR __ACCESS_CP15(c9, 0, c12, 5) +#define PMCEID0 __ACCESS_CP15(c9, 0, c12, 6) +#define PMCEID1 __ACCESS_CP15(c9, 0, c12, 7) +#define PMXEVTYPER __ACCESS_CP15(c9, 0, c13, 1) +#define PMXEVCNTR __ACCESS_CP15(c9, 0, c13, 2) +#define PMUSERENR __ACCESS_CP15(c9, 0, c14, 0) +#define PMINTENSET __ACCESS_CP15(c9, 0, c14, 1) +#define PMINTENCLR __ACCESS_CP15(c9, 0, c14, 2) +#define PMMIR __ACCESS_CP15(c9, 0, c14, 6) +#define PMCCFILTR __ACCESS_CP15(c14, 0, c15, 7) +#define PMEVCNTR0(n) __ACCESS_CP15(c14, 0, c8, n) +#define PMEVCNTR1(n) __ACCESS_CP15(c14, 0, c9, n) +#define PMEVCNTR2(n) __ACCESS_CP15(c14, 0, c10, n) +#define PMEVCNTR3(n) __ACCESS_CP15(c14, 0, c11, n) +#define PMEVTYPER0(n) __ACCESS_CP15(c14, 0, c12, n) +#define PMEVTYPER1(n) __ACCESS_CP15(c14, 0, c13, n) +#define PMEVTYPER2(n) __ACCESS_CP15(c14, 0, c14, n) +#define PMEVTYPER3(n) __ACCESS_CP15(c14, 0, c15, n) + +#define PMEV_EVENTS_PER_REG 8 +#define PMEV_REGISTER(n) (n / PMEV_EVENTS_PER_REG) +#define PMEV_EVENT(n) (n % PMEV_EVENTS_PER_REG) + +#define PMEV_CASE(reg, ev, case_macro) \ + case ev: \ + case_macro(reg, ev); \ + break + +#define PMEV_EV_SWITCH(reg, ev, case_macro) \ + do { \ + switch (ev) { \ + PMEV_CASE(reg, 0, case_macro); \ + PMEV_CASE(reg, 1, case_macro); \ + PMEV_CASE(reg, 2, case_macro); \ + PMEV_CASE(reg, 3, case_macro); \ + PMEV_CASE(reg, 4, case_macro); \ + PMEV_CASE(reg, 5, case_macro); \ + PMEV_CASE(reg, 6, case_macro); \ + PMEV_CASE(reg, 7, case_macro); \ + default: \ + WARN(1, "Invalid PMEV* event index\n"); \ + } \ + } while (0) + +#define PMEV_REG_SWITCH(reg, ev, case_macro) \ + do { \ + switch (reg) { \ + case 0: \ + PMEV_EV_SWITCH(0, ev, case_macro); \ + break; \ + case 1: \ + PMEV_EV_SWITCH(1, ev, case_macro); \ + break; \ + case 2: \ + PMEV_EV_SWITCH(2, ev, case_macro); \ + break; \ + case 3: \ + PMEV_EV_SWITCH(3, ev, case_macro); \ + break; \ + default: \ + WARN(1, "Invalid PMEV* register index\n"); \ + } \ + } while (0) + +#define RETURN_READ_PMEVCNTR(reg, ev) \ + return read_sysreg(PMEVCNTR##reg(ev)) +static unsigned long read_pmevcntrn(int n) +{ + const int reg = PMEV_REGISTER(n); + const int event = PMEV_EVENT(n); + + PMEV_REG_SWITCH(reg, event, RETURN_READ_PMEVCNTR); + return 0; +} + +#define WRITE_PMEVCNTR(reg, ev) \ + write_sysreg(val, PMEVCNTR##reg(ev)) +static void write_pmevcntrn(int n, unsigned long val) +{ + const int reg = PMEV_REGISTER(n); + const int event = PMEV_EVENT(n); + + PMEV_REG_SWITCH(reg, event, WRITE_PMEVCNTR); +} + +#define WRITE_PMEVTYPER(reg, ev) \ + write_sysreg(val, PMEVTYPER##reg(ev)) +static void write_pmevtypern(int n, unsigned long val) +{ + const int reg = PMEV_REGISTER(n); + const int event = PMEV_EVENT(n); + + PMEV_REG_SWITCH(reg, event, WRITE_PMEVTYPER); +} + +static inline unsigned long read_pmmir(void) +{ + return read_sysreg(PMMIR); +} + +static inline u32 read_pmuver(void) +{ + /* PMUVers is not a signed field */ + u32 dfr0 = read_cpuid_ext(CPUID_EXT_DFR0); + + return (dfr0 >> 24) & 0xf; +} + +static inline void write_pmcr(u32 val) +{ + write_sysreg(val, PMCR); +} + +static inline u32 read_pmcr(void) +{ + return read_sysreg(PMCR); +} + +static inline void write_pmselr(u32 val) +{ + write_sysreg(val, PMSELR); +} + +static inline void write_pmccntr(u64 val) +{ + write_sysreg(val, PMCCNTR); +} + +static inline u64 read_pmccntr(void) +{ + return read_sysreg(PMCCNTR); +} + +static inline void write_pmxevcntr(u32 val) +{ + write_sysreg(val, PMXEVCNTR); +} + +static inline u32 read_pmxevcntr(void) +{ + return read_sysreg(PMXEVCNTR); +} + +static inline void write_pmxevtyper(u32 val) +{ + write_sysreg(val, PMXEVTYPER); +} + +static inline void write_pmcntenset(u32 val) +{ + write_sysreg(val, PMCNTENSET); +} + +static inline void write_pmcntenclr(u32 val) +{ + write_sysreg(val, PMCNTENCLR); +} + +static inline void write_pmintenset(u32 val) +{ + write_sysreg(val, PMINTENSET); +} + +static inline void write_pmintenclr(u32 val) +{ + write_sysreg(val, PMINTENCLR); +} + +static inline void write_pmccfiltr(u32 val) +{ + write_sysreg(val, PMCCFILTR); +} + +static inline void write_pmovsclr(u32 val) +{ + write_sysreg(val, PMOVSR); +} + +static inline u32 read_pmovsclr(void) +{ + return read_sysreg(PMOVSR); +} + +static inline void write_pmuserenr(u32 val) +{ + write_sysreg(val, PMUSERENR); +} + +static inline u32 read_pmceid0(void) +{ + return read_sysreg(PMCEID0); +} + +static inline u32 read_pmceid1(void) +{ + return read_sysreg(PMCEID1); +} + +static inline void +armv8pmu_kvm_set_events(u32 set, struct perf_event_attr *attr) {} + +static inline void armv8pmu_kvm_clr_events(u32 clr) {} + +static inline bool armv8pmu_kvm_counter_deferred(struct perf_event_attr *attr) +{ + return false; +} + +#endif diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig index defe6b47854b..711f82400086 100644 --- a/drivers/perf/Kconfig +++ b/drivers/perf/Kconfig @@ -101,15 +101,14 @@ config ARM_SMMU_V3_PMU based on the Stream ID of the corresponding master. config ARM_PMUV3 - depends on HW_PERF_EVENTS && ARM64 + depends on HW_PERF_EVENTS && ((ARM && CPU_V7) || ARM64) bool "ARM PMUv3 support" if !ARM64 - default y + default ARM64 help Say y if you want to use the ARM performance monitor unit (PMU) version 3. The PMUv3 is the CPU performance monitors on ARMv8 (aarch32 and aarch64) systems that implement the PMUv3 architecture. - Currently, PMUv3 is only supported on aarch64 (arm64) config ARM_DSU_PMU tristate "ARM DynamIQ Shared Unit (DSU) PMU"