diff mbox series

[04/17] arm64: entry: move preempt logic to C

Message ID 20200108185634.1163-5-mark.rutland@arm.com (mailing list archive)
State New, archived
Headers show
Series arm64: entry deasmification and cleanup | expand

Commit Message

Mark Rutland Jan. 8, 2020, 6:56 p.m. UTC
Some of our preeemption logic is in C, while a portion of it is in
assembly. Let's pull the remainder  of it into C so that it lives in one
place.

At the same time, let's improve the comments regarding NMI preemption to
make it clearer why we cannot preempt from NMIs.

Subsequent patches will covert the caller of el1_preempt() to C.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Julien Thierry <julien.thierry.kdev@gmail.com>
Cc: Will Deacon <will@kernel.org>
---
 arch/arm64/kernel/entry-common.c | 18 +++++++++++++++++-
 arch/arm64/kernel/entry.S        | 13 +------------
 2 files changed, 18 insertions(+), 13 deletions(-)

Comments

Anshuman Khandual Jan. 9, 2020, 6:43 a.m. UTC | #1
On 01/09/2020 12:26 AM, Mark Rutland wrote:
> Some of our preeemption logic is in C, while a portion of it is in
> assembly. Let's pull the remainder  of it into C so that it lives in one
> place.
> 
> At the same time, let's improve the comments regarding NMI preemption to
> make it clearer why we cannot preempt from NMIs.
> 
> Subsequent patches will covert the caller of el1_preempt() to C.
> 
> Signed-off-by: Mark Rutland <mark.rutland@arm.com>
> Cc: Catalin Marinas <catalin.marinas@arm.com>
> Cc: James Morse <james.morse@arm.com>
> Cc: Julien Thierry <julien.thierry.kdev@gmail.com>
> Cc: Will Deacon <will@kernel.org>
> ---
>  arch/arm64/kernel/entry-common.c | 18 +++++++++++++++++-
>  arch/arm64/kernel/entry.S        | 13 +------------
>  2 files changed, 18 insertions(+), 13 deletions(-)
> 
> diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
> index 4fa058453468..b93ca2148a20 100644
> --- a/arch/arm64/kernel/entry-common.c
> +++ b/arch/arm64/kernel/entry-common.c
> @@ -8,6 +8,7 @@
>  #include <linux/context_tracking.h>
>  #include <linux/linkage.h>
>  #include <linux/lockdep.h>
> +#include <linux/preempt.h>
>  #include <linux/ptrace.h>
>  #include <linux/sched/debug.h>
>  #include <linux/thread_info.h>
> @@ -334,8 +335,23 @@ asmlinkage void notrace el0_sync_compat_handler(struct pt_regs *regs)
>  NOKPROBE_SYMBOL(el0_sync_compat_handler);
>  #endif /* CONFIG_COMPAT */
>  
> -asmlinkage void __sched arm64_preempt_schedule_irq(void)
> +asmlinkage void __sched el1_preempt(void)
>  {
> +	if (!IS_ENABLED(CONFIG_PREEMPT) || preempt_count())
> +		return;

IS_ENABLED(CONFIG_PREEMPT) is not really required here as the single
call site for el1_preempt() is still wrapped around CONFIG_PREEMPT.
So we should retain any one of them.

> +
> +	/*
> +	 * To avoid nesting NMIs and overflowing the stack, we must leave NMIs
> +	 * masked until the exception return. We want to context-switch with
> +	 * IRQs masked but NMIs enabled, so cannot preempt an NMI.
> +	 *
> +	 * PSTATE.{D,A,F} are cleared for IRQ and NMI by el1_irq().
> +	 * When gic_handle_irq() handles an NMI, it leaves PSTATE.I set.
> +	 * If anything is set in DAIF, this is an NMI.
> +	 */
> +	if (system_uses_irq_prio_masking() && read_sysreg(daif) != 0)

In case above CONFIG_PREEMPT gets dropped, preempt_count() can be
moved here as well.

> +		return;
> +
>  	lockdep_assert_irqs_disabled();
>  
>  	/*
> diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
> index 7c6a0a41676f..53ce1877a4aa 100644
> --- a/arch/arm64/kernel/entry.S
> +++ b/arch/arm64/kernel/entry.S
> @@ -606,18 +606,7 @@ el1_irq:
>  	irq_handler
>  
>  #ifdef CONFIG_PREEMPT
> -	ldr	x24, [tsk, #TSK_TI_PREEMPT]	// get preempt count
> -alternative_if ARM64_HAS_IRQ_PRIO_MASKING
> -	/*
> -	 * DA_F were cleared at start of handling. If anything is set in DAIF,
> -	 * we come back from an NMI, so skip preemption
> -	 */
> -	mrs	x0, daif
> -	orr	x24, x24, x0
> -alternative_else_nop_endif
> -	cbnz	x24, 1f				// preempt count != 0 || NMI return path
> -	bl	arm64_preempt_schedule_irq	// irq en/disable is done inside
> -1:
> +	bl	el1_preempt
>  #endif
>  
>  #ifdef CONFIG_ARM64_PSEUDO_NMI
>
Mark Rutland Jan. 9, 2020, 12:22 p.m. UTC | #2
On Thu, Jan 09, 2020 at 12:13:13PM +0530, Anshuman Khandual wrote:
> On 01/09/2020 12:26 AM, Mark Rutland wrote:
> > Some of our preeemption logic is in C, while a portion of it is in
> > assembly. Let's pull the remainder  of it into C so that it lives in one
> > place.
> > 
> > At the same time, let's improve the comments regarding NMI preemption to
> > make it clearer why we cannot preempt from NMIs.
> > 
> > Subsequent patches will covert the caller of el1_preempt() to C.
> > 
> > Signed-off-by: Mark Rutland <mark.rutland@arm.com>
> > Cc: Catalin Marinas <catalin.marinas@arm.com>
> > Cc: James Morse <james.morse@arm.com>
> > Cc: Julien Thierry <julien.thierry.kdev@gmail.com>
> > Cc: Will Deacon <will@kernel.org>
> > ---
> >  arch/arm64/kernel/entry-common.c | 18 +++++++++++++++++-
> >  arch/arm64/kernel/entry.S        | 13 +------------
> >  2 files changed, 18 insertions(+), 13 deletions(-)
> > 
> > diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
> > index 4fa058453468..b93ca2148a20 100644
> > --- a/arch/arm64/kernel/entry-common.c
> > +++ b/arch/arm64/kernel/entry-common.c
> > @@ -8,6 +8,7 @@
> >  #include <linux/context_tracking.h>
> >  #include <linux/linkage.h>
> >  #include <linux/lockdep.h>
> > +#include <linux/preempt.h>
> >  #include <linux/ptrace.h>
> >  #include <linux/sched/debug.h>
> >  #include <linux/thread_info.h>
> > @@ -334,8 +335,23 @@ asmlinkage void notrace el0_sync_compat_handler(struct pt_regs *regs)
> >  NOKPROBE_SYMBOL(el0_sync_compat_handler);
> >  #endif /* CONFIG_COMPAT */
> >  
> > -asmlinkage void __sched arm64_preempt_schedule_irq(void)
> > +asmlinkage void __sched el1_preempt(void)
> >  {
> > +	if (!IS_ENABLED(CONFIG_PREEMPT) || preempt_count())
> > +		return;
> 
> IS_ENABLED(CONFIG_PREEMPT) is not really required here as the single
> call site for el1_preempt() is still wrapped around CONFIG_PREEMPT.
> So we should retain any one of them.

Using both was deliberate.

I wanted to avoid ifdeffery here, but also wanted to avoid the
unnecessary bits being build for !CONFIG_PREEMPT buillds, in both this
function and the caller.

In a subsequent patch this'll be made static, and called
unconditionally, where we'll definitely need to IS_ENABLED(). I'll
update the commit message to make this part explicit.

> 
> > +
> > +	/*
> > +	 * To avoid nesting NMIs and overflowing the stack, we must leave NMIs
> > +	 * masked until the exception return. We want to context-switch with
> > +	 * IRQs masked but NMIs enabled, so cannot preempt an NMI.
> > +	 *
> > +	 * PSTATE.{D,A,F} are cleared for IRQ and NMI by el1_irq().
> > +	 * When gic_handle_irq() handles an NMI, it leaves PSTATE.I set.
> > +	 * If anything is set in DAIF, this is an NMI.
> > +	 */
> > +	if (system_uses_irq_prio_masking() && read_sysreg(daif) != 0)
> 
> In case above CONFIG_PREEMPT gets dropped, preempt_count() can be
> moved here as well.

As above, I'm going to leave this as-is.

I also think it's worth keeping this separate from the early return for
!preempt_count() due to the comment, which is specific to the GIC prio
masking logic.

Thanks,
Mark.

> 
> > +		return;
> > +
> >  	lockdep_assert_irqs_disabled();
> >  
> >  	/*
> > diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
> > index 7c6a0a41676f..53ce1877a4aa 100644
> > --- a/arch/arm64/kernel/entry.S
> > +++ b/arch/arm64/kernel/entry.S
> > @@ -606,18 +606,7 @@ el1_irq:
> >  	irq_handler
> >  
> >  #ifdef CONFIG_PREEMPT
> > -	ldr	x24, [tsk, #TSK_TI_PREEMPT]	// get preempt count
> > -alternative_if ARM64_HAS_IRQ_PRIO_MASKING
> > -	/*
> > -	 * DA_F were cleared at start of handling. If anything is set in DAIF,
> > -	 * we come back from an NMI, so skip preemption
> > -	 */
> > -	mrs	x0, daif
> > -	orr	x24, x24, x0
> > -alternative_else_nop_endif
> > -	cbnz	x24, 1f				// preempt count != 0 || NMI return path
> > -	bl	arm64_preempt_schedule_irq	// irq en/disable is done inside
> > -1:
> > +	bl	el1_preempt
> >  #endif
> >  
> >  #ifdef CONFIG_ARM64_PSEUDO_NMI
> >
diff mbox series

Patch

diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index 4fa058453468..b93ca2148a20 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -8,6 +8,7 @@ 
 #include <linux/context_tracking.h>
 #include <linux/linkage.h>
 #include <linux/lockdep.h>
+#include <linux/preempt.h>
 #include <linux/ptrace.h>
 #include <linux/sched/debug.h>
 #include <linux/thread_info.h>
@@ -334,8 +335,23 @@  asmlinkage void notrace el0_sync_compat_handler(struct pt_regs *regs)
 NOKPROBE_SYMBOL(el0_sync_compat_handler);
 #endif /* CONFIG_COMPAT */
 
-asmlinkage void __sched arm64_preempt_schedule_irq(void)
+asmlinkage void __sched el1_preempt(void)
 {
+	if (!IS_ENABLED(CONFIG_PREEMPT) || preempt_count())
+		return;
+
+	/*
+	 * To avoid nesting NMIs and overflowing the stack, we must leave NMIs
+	 * masked until the exception return. We want to context-switch with
+	 * IRQs masked but NMIs enabled, so cannot preempt an NMI.
+	 *
+	 * PSTATE.{D,A,F} are cleared for IRQ and NMI by el1_irq().
+	 * When gic_handle_irq() handles an NMI, it leaves PSTATE.I set.
+	 * If anything is set in DAIF, this is an NMI.
+	 */
+	if (system_uses_irq_prio_masking() && read_sysreg(daif) != 0)
+		return;
+
 	lockdep_assert_irqs_disabled();
 
 	/*
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 7c6a0a41676f..53ce1877a4aa 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -606,18 +606,7 @@  el1_irq:
 	irq_handler
 
 #ifdef CONFIG_PREEMPT
-	ldr	x24, [tsk, #TSK_TI_PREEMPT]	// get preempt count
-alternative_if ARM64_HAS_IRQ_PRIO_MASKING
-	/*
-	 * DA_F were cleared at start of handling. If anything is set in DAIF,
-	 * we come back from an NMI, so skip preemption
-	 */
-	mrs	x0, daif
-	orr	x24, x24, x0
-alternative_else_nop_endif
-	cbnz	x24, 1f				// preempt count != 0 || NMI return path
-	bl	arm64_preempt_schedule_irq	// irq en/disable is done inside
-1:
+	bl	el1_preempt
 #endif
 
 #ifdef CONFIG_ARM64_PSEUDO_NMI