Message ID | 1542023835-21446-8-git-send-email-julien.thierry@arm.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | arm64: provide pseudo NMI with GICv3 | expand |
On Mon, Nov 12, 2018 at 11:56:58AM +0000, Julien Thierry wrote: > If ICC_PMR_EL1 is used to mask interrupts, its value should be > saved/restored whenever a task is context switched out/in or > gets an exception. > > Add PMR to the registers to save in the pt_regs struct upon kernel entry, > and restore it before ERET. Also, initialize it to a sane value when > creating new tasks. Could you please elaborate on when this matters? Does this actually matter for context-switch? Can we do that in a pseudo-NMI handler? Or does this only matter for exception entry/return, and not context-switch? Thanks, Mark. > Signed-off-by: Julien Thierry <julien.thierry@arm.com> > Cc: Catalin Marinas <catalin.marinas@arm.com> > Cc: Will Deacon <will.deacon@arm.com> > Cc: Dave Martin <Dave.Martin@arm.com> > --- > arch/arm64/include/asm/processor.h | 3 +++ > arch/arm64/include/asm/ptrace.h | 14 +++++++++++--- > arch/arm64/kernel/asm-offsets.c | 1 + > arch/arm64/kernel/entry.S | 13 +++++++++++++ > arch/arm64/kernel/process.c | 6 ++++++ > 5 files changed, 34 insertions(+), 3 deletions(-) > > diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h > index 6b0d4df..b2315ef 100644 > --- a/arch/arm64/include/asm/processor.h > +++ b/arch/arm64/include/asm/processor.h > @@ -168,6 +168,9 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc) > memset(regs, 0, sizeof(*regs)); > forget_syscall(regs); > regs->pc = pc; > + > + if (system_supports_irq_prio_masking()) > + regs->pmr_save = GIC_PRIO_IRQON; > } > > static inline void start_thread(struct pt_regs *regs, unsigned long pc, > diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h > index ce6998c..0ad46f5 100644 > --- a/arch/arm64/include/asm/ptrace.h > +++ b/arch/arm64/include/asm/ptrace.h > @@ -19,6 +19,8 @@ > #ifndef __ASM_PTRACE_H > #define __ASM_PTRACE_H > > +#include <asm/cpufeature.h> > + > #include <uapi/asm/ptrace.h> > > /* Current Exception Level values, as contained in CurrentEL */ > @@ -173,7 +175,8 @@ struct pt_regs { > #endif > > u64 orig_addr_limit; > - u64 unused; // maintain 16 byte alignment > + /* Only valid when ARM64_HAS_IRQ_PRIO_MASKING is enabled. */ > + u64 pmr_save; > u64 stackframe[2]; > }; > > @@ -208,8 +211,13 @@ static inline void forget_syscall(struct pt_regs *regs) > #define processor_mode(regs) \ > ((regs)->pstate & PSR_MODE_MASK) > > -#define interrupts_enabled(regs) \ > - (!((regs)->pstate & PSR_I_BIT)) > +#define irqs_priority_unmasked(regs) \ > + (system_supports_irq_prio_masking() ? \ > + (regs)->pmr_save & GIC_PRIO_STATUS_BIT : \ > + true) > + > +#define interrupts_enabled(regs) \ > + (!((regs)->pstate & PSR_I_BIT) && irqs_priority_unmasked(regs)) > > #define fast_interrupts_enabled(regs) \ > (!((regs)->pstate & PSR_F_BIT)) > diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c > index 323aeb5..bab4122 100644 > --- a/arch/arm64/kernel/asm-offsets.c > +++ b/arch/arm64/kernel/asm-offsets.c > @@ -78,6 +78,7 @@ int main(void) > DEFINE(S_ORIG_X0, offsetof(struct pt_regs, orig_x0)); > DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno)); > DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit)); > + DEFINE(S_PMR_SAVE, offsetof(struct pt_regs, pmr_save)); > DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe)); > DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs)); > BLANK(); > diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S > index 039144e..eb8120e 100644 > --- a/arch/arm64/kernel/entry.S > +++ b/arch/arm64/kernel/entry.S > @@ -249,6 +249,12 @@ alternative_else_nop_endif > msr sp_el0, tsk > .endif > > + /* Save pmr */ > +alternative_if ARM64_HAS_IRQ_PRIO_MASKING > + mrs_s x20, SYS_ICC_PMR_EL1 > + str x20, [sp, #S_PMR_SAVE] > +alternative_else_nop_endif > + > /* > * Registers that may be useful after this macro is invoked: > * > @@ -269,6 +275,13 @@ alternative_else_nop_endif > /* No need to restore UAO, it will be restored from SPSR_EL1 */ > .endif > > + /* Restore pmr */ > +alternative_if ARM64_HAS_IRQ_PRIO_MASKING > + ldr x20, [sp, #S_PMR_SAVE] > + msr_s SYS_ICC_PMR_EL1, x20 > + dsb sy > +alternative_else_nop_endif > + > ldp x21, x22, [sp, #S_PC] // load ELR, SPSR > .if \el == 0 > ct_user_enter > diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c > index d9a4c2d..71e8850 100644 > --- a/arch/arm64/kernel/process.c > +++ b/arch/arm64/kernel/process.c > @@ -231,6 +231,9 @@ void __show_regs(struct pt_regs *regs) > > printk("sp : %016llx\n", sp); > > + if (system_supports_irq_prio_masking()) > + printk("pmr_save: %08llx\n", regs->pmr_save); > + > i = top_reg; > > while (i >= 0) { > @@ -362,6 +365,9 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, > if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) > childregs->pstate |= PSR_SSBS_BIT; > > + if (system_supports_irq_prio_masking()) > + childregs->pmr_save = GIC_PRIO_IRQON; > + > p->thread.cpu_context.x19 = stack_start; > p->thread.cpu_context.x20 = stk_sz; > } > -- > 1.9.1 >
On 29/11/18 16:46, Mark Rutland wrote: > On Mon, Nov 12, 2018 at 11:56:58AM +0000, Julien Thierry wrote: >> If ICC_PMR_EL1 is used to mask interrupts, its value should be >> saved/restored whenever a task is context switched out/in or >> gets an exception. >> >> Add PMR to the registers to save in the pt_regs struct upon kernel entry, >> and restore it before ERET. Also, initialize it to a sane value when >> creating new tasks. > > Could you please elaborate on when this matters? > > Does this actually matter for context-switch? Can we do that in a > pseudo-NMI handler? > > Or does this only matter for exception entry/return, and not > context-switch? > Yes, PMR becomes an equivalent of PSR.I and in the same way it will need to be saved/restored on exception entry/return (except this is not done by the architecture for PMR). It is also necessary when context switching to a task that was preempted in exception context (el1_preempt, return from syscall, ...). In the same way as spsr_el1. I'll update the commit message. Thanks, Julien > >> Signed-off-by: Julien Thierry <julien.thierry@arm.com> >> Cc: Catalin Marinas <catalin.marinas@arm.com> >> Cc: Will Deacon <will.deacon@arm.com> >> Cc: Dave Martin <Dave.Martin@arm.com> >> --- >> arch/arm64/include/asm/processor.h | 3 +++ >> arch/arm64/include/asm/ptrace.h | 14 +++++++++++--- >> arch/arm64/kernel/asm-offsets.c | 1 + >> arch/arm64/kernel/entry.S | 13 +++++++++++++ >> arch/arm64/kernel/process.c | 6 ++++++ >> 5 files changed, 34 insertions(+), 3 deletions(-) >> >> diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h >> index 6b0d4df..b2315ef 100644 >> --- a/arch/arm64/include/asm/processor.h >> +++ b/arch/arm64/include/asm/processor.h >> @@ -168,6 +168,9 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc) >> memset(regs, 0, sizeof(*regs)); >> forget_syscall(regs); >> regs->pc = pc; >> + >> + if (system_supports_irq_prio_masking()) >> + regs->pmr_save = GIC_PRIO_IRQON; >> } >> >> static inline void start_thread(struct pt_regs *regs, unsigned long pc, >> diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h >> index ce6998c..0ad46f5 100644 >> --- a/arch/arm64/include/asm/ptrace.h >> +++ b/arch/arm64/include/asm/ptrace.h >> @@ -19,6 +19,8 @@ >> #ifndef __ASM_PTRACE_H >> #define __ASM_PTRACE_H >> >> +#include <asm/cpufeature.h> >> + >> #include <uapi/asm/ptrace.h> >> >> /* Current Exception Level values, as contained in CurrentEL */ >> @@ -173,7 +175,8 @@ struct pt_regs { >> #endif >> >> u64 orig_addr_limit; >> - u64 unused; // maintain 16 byte alignment >> + /* Only valid when ARM64_HAS_IRQ_PRIO_MASKING is enabled. */ >> + u64 pmr_save; >> u64 stackframe[2]; >> }; >> >> @@ -208,8 +211,13 @@ static inline void forget_syscall(struct pt_regs *regs) >> #define processor_mode(regs) \ >> ((regs)->pstate & PSR_MODE_MASK) >> >> -#define interrupts_enabled(regs) \ >> - (!((regs)->pstate & PSR_I_BIT)) >> +#define irqs_priority_unmasked(regs) \ >> + (system_supports_irq_prio_masking() ? \ >> + (regs)->pmr_save & GIC_PRIO_STATUS_BIT : \ >> + true) >> + >> +#define interrupts_enabled(regs) \ >> + (!((regs)->pstate & PSR_I_BIT) && irqs_priority_unmasked(regs)) >> >> #define fast_interrupts_enabled(regs) \ >> (!((regs)->pstate & PSR_F_BIT)) >> diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c >> index 323aeb5..bab4122 100644 >> --- a/arch/arm64/kernel/asm-offsets.c >> +++ b/arch/arm64/kernel/asm-offsets.c >> @@ -78,6 +78,7 @@ int main(void) >> DEFINE(S_ORIG_X0, offsetof(struct pt_regs, orig_x0)); >> DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno)); >> DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit)); >> + DEFINE(S_PMR_SAVE, offsetof(struct pt_regs, pmr_save)); >> DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe)); >> DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs)); >> BLANK(); >> diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S >> index 039144e..eb8120e 100644 >> --- a/arch/arm64/kernel/entry.S >> +++ b/arch/arm64/kernel/entry.S >> @@ -249,6 +249,12 @@ alternative_else_nop_endif >> msr sp_el0, tsk >> .endif >> >> + /* Save pmr */ >> +alternative_if ARM64_HAS_IRQ_PRIO_MASKING >> + mrs_s x20, SYS_ICC_PMR_EL1 >> + str x20, [sp, #S_PMR_SAVE] >> +alternative_else_nop_endif >> + >> /* >> * Registers that may be useful after this macro is invoked: >> * >> @@ -269,6 +275,13 @@ alternative_else_nop_endif >> /* No need to restore UAO, it will be restored from SPSR_EL1 */ >> .endif >> >> + /* Restore pmr */ >> +alternative_if ARM64_HAS_IRQ_PRIO_MASKING >> + ldr x20, [sp, #S_PMR_SAVE] >> + msr_s SYS_ICC_PMR_EL1, x20 >> + dsb sy >> +alternative_else_nop_endif >> + >> ldp x21, x22, [sp, #S_PC] // load ELR, SPSR >> .if \el == 0 >> ct_user_enter >> diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c >> index d9a4c2d..71e8850 100644 >> --- a/arch/arm64/kernel/process.c >> +++ b/arch/arm64/kernel/process.c >> @@ -231,6 +231,9 @@ void __show_regs(struct pt_regs *regs) >> >> printk("sp : %016llx\n", sp); >> >> + if (system_supports_irq_prio_masking()) >> + printk("pmr_save: %08llx\n", regs->pmr_save); >> + >> i = top_reg; >> >> while (i >= 0) { >> @@ -362,6 +365,9 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, >> if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) >> childregs->pstate |= PSR_SSBS_BIT; >> >> + if (system_supports_irq_prio_masking()) >> + childregs->pmr_save = GIC_PRIO_IRQON; >> + >> p->thread.cpu_context.x19 = stack_start; >> p->thread.cpu_context.x20 = stk_sz; >> } >> -- >> 1.9.1 >>
On Mon, Nov 12, 2018 at 11:56:58AM +0000, Julien Thierry wrote: > diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S > index 039144e..eb8120e 100644 > --- a/arch/arm64/kernel/entry.S > +++ b/arch/arm64/kernel/entry.S > @@ -249,6 +249,12 @@ alternative_else_nop_endif > msr sp_el0, tsk > .endif > > + /* Save pmr */ > +alternative_if ARM64_HAS_IRQ_PRIO_MASKING > + mrs_s x20, SYS_ICC_PMR_EL1 > + str x20, [sp, #S_PMR_SAVE] > +alternative_else_nop_endif > + > /* > * Registers that may be useful after this macro is invoked: > * > @@ -269,6 +275,13 @@ alternative_else_nop_endif > /* No need to restore UAO, it will be restored from SPSR_EL1 */ > .endif > > + /* Restore pmr */ > +alternative_if ARM64_HAS_IRQ_PRIO_MASKING > + ldr x20, [sp, #S_PMR_SAVE] > + msr_s SYS_ICC_PMR_EL1, x20 > + dsb sy > +alternative_else_nop_endif What's this DSB for? If it's needed, please add a comment. I would have expected an ISB (or none at all as we are going to return from an exception).
On 04/12/18 17:09, Catalin Marinas wrote: > On Mon, Nov 12, 2018 at 11:56:58AM +0000, Julien Thierry wrote: >> diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S >> index 039144e..eb8120e 100644 >> --- a/arch/arm64/kernel/entry.S >> +++ b/arch/arm64/kernel/entry.S >> @@ -249,6 +249,12 @@ alternative_else_nop_endif >> msr sp_el0, tsk >> .endif >> >> + /* Save pmr */ >> +alternative_if ARM64_HAS_IRQ_PRIO_MASKING >> + mrs_s x20, SYS_ICC_PMR_EL1 >> + str x20, [sp, #S_PMR_SAVE] >> +alternative_else_nop_endif >> + >> /* >> * Registers that may be useful after this macro is invoked: >> * >> @@ -269,6 +275,13 @@ alternative_else_nop_endif >> /* No need to restore UAO, it will be restored from SPSR_EL1 */ >> .endif >> >> + /* Restore pmr */ >> +alternative_if ARM64_HAS_IRQ_PRIO_MASKING >> + ldr x20, [sp, #S_PMR_SAVE] >> + msr_s SYS_ICC_PMR_EL1, x20 >> + dsb sy >> +alternative_else_nop_endif > > What's this DSB for? If it's needed, please add a comment. > The DSB is to make sure that, in the case we are unmasking interrupt priorities, the unmasking is seen by the redistributor. Without it the redistributor might only start forwarding interrupts (if their priorities are too low) to the CPU once it has seen that PMR was modified, which could happen after the CPU has returned from the exception. I'll add a comment. > I would have expected an ISB (or none at all as we are going to return > from an exception). So the two reasons we don't need an ISB are: - the only thing that matter is that PMR modification + DSB happens before exception return - writes to ICC_PMR_EL1 are self synchronizing so we don't need an ISB before the DSB Thanks,
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 6b0d4df..b2315ef 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -168,6 +168,9 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc) memset(regs, 0, sizeof(*regs)); forget_syscall(regs); regs->pc = pc; + + if (system_supports_irq_prio_masking()) + regs->pmr_save = GIC_PRIO_IRQON; } static inline void start_thread(struct pt_regs *regs, unsigned long pc, diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index ce6998c..0ad46f5 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -19,6 +19,8 @@ #ifndef __ASM_PTRACE_H #define __ASM_PTRACE_H +#include <asm/cpufeature.h> + #include <uapi/asm/ptrace.h> /* Current Exception Level values, as contained in CurrentEL */ @@ -173,7 +175,8 @@ struct pt_regs { #endif u64 orig_addr_limit; - u64 unused; // maintain 16 byte alignment + /* Only valid when ARM64_HAS_IRQ_PRIO_MASKING is enabled. */ + u64 pmr_save; u64 stackframe[2]; }; @@ -208,8 +211,13 @@ static inline void forget_syscall(struct pt_regs *regs) #define processor_mode(regs) \ ((regs)->pstate & PSR_MODE_MASK) -#define interrupts_enabled(regs) \ - (!((regs)->pstate & PSR_I_BIT)) +#define irqs_priority_unmasked(regs) \ + (system_supports_irq_prio_masking() ? \ + (regs)->pmr_save & GIC_PRIO_STATUS_BIT : \ + true) + +#define interrupts_enabled(regs) \ + (!((regs)->pstate & PSR_I_BIT) && irqs_priority_unmasked(regs)) #define fast_interrupts_enabled(regs) \ (!((regs)->pstate & PSR_F_BIT)) diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 323aeb5..bab4122 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -78,6 +78,7 @@ int main(void) DEFINE(S_ORIG_X0, offsetof(struct pt_regs, orig_x0)); DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno)); DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit)); + DEFINE(S_PMR_SAVE, offsetof(struct pt_regs, pmr_save)); DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe)); DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs)); BLANK(); diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 039144e..eb8120e 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -249,6 +249,12 @@ alternative_else_nop_endif msr sp_el0, tsk .endif + /* Save pmr */ +alternative_if ARM64_HAS_IRQ_PRIO_MASKING + mrs_s x20, SYS_ICC_PMR_EL1 + str x20, [sp, #S_PMR_SAVE] +alternative_else_nop_endif + /* * Registers that may be useful after this macro is invoked: * @@ -269,6 +275,13 @@ alternative_else_nop_endif /* No need to restore UAO, it will be restored from SPSR_EL1 */ .endif + /* Restore pmr */ +alternative_if ARM64_HAS_IRQ_PRIO_MASKING + ldr x20, [sp, #S_PMR_SAVE] + msr_s SYS_ICC_PMR_EL1, x20 + dsb sy +alternative_else_nop_endif + ldp x21, x22, [sp, #S_PC] // load ELR, SPSR .if \el == 0 ct_user_enter diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index d9a4c2d..71e8850 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -231,6 +231,9 @@ void __show_regs(struct pt_regs *regs) printk("sp : %016llx\n", sp); + if (system_supports_irq_prio_masking()) + printk("pmr_save: %08llx\n", regs->pmr_save); + i = top_reg; while (i >= 0) { @@ -362,6 +365,9 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) childregs->pstate |= PSR_SSBS_BIT; + if (system_supports_irq_prio_masking()) + childregs->pmr_save = GIC_PRIO_IRQON; + p->thread.cpu_context.x19 = stack_start; p->thread.cpu_context.x20 = stk_sz; }
If ICC_PMR_EL1 is used to mask interrupts, its value should be saved/restored whenever a task is context switched out/in or gets an exception. Add PMR to the registers to save in the pt_regs struct upon kernel entry, and restore it before ERET. Also, initialize it to a sane value when creating new tasks. Signed-off-by: Julien Thierry <julien.thierry@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Dave Martin <Dave.Martin@arm.com> --- arch/arm64/include/asm/processor.h | 3 +++ arch/arm64/include/asm/ptrace.h | 14 +++++++++++--- arch/arm64/kernel/asm-offsets.c | 1 + arch/arm64/kernel/entry.S | 13 +++++++++++++ arch/arm64/kernel/process.c | 6 ++++++ 5 files changed, 34 insertions(+), 3 deletions(-)