diff mbox series

[v2,09/25] arm64: entry: Map the FIQ vector to IRQ on NEEDS_FIQ platforms

Message ID 20210215121713.57687-10-marcan@marcan.st (mailing list archive)
State New, archived
Headers show
Series Apple M1 SoC platform bring-up | expand

Commit Message

Hector Martin Feb. 15, 2021, 12:16 p.m. UTC
From: Marc Zyngier <maz@kernel.org>

By default, FIQ exceptions trigger a panic. On platforms that need to
deliver interrupts via FIQ, this gets redirected via an alternative to
instead handle FIQ the same way as IRQ. It is up to the irqchip handler
to discriminate between the two.

Signed-off-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Hector Martin <marcan@marcan.st>
---
 arch/arm64/kernel/entry.S | 21 ++++++++++++++++++---
 1 file changed, 18 insertions(+), 3 deletions(-)

Comments

Mark Rutland Feb. 17, 2021, 11:49 a.m. UTC | #1
Hi Hector,

On Mon, Feb 15, 2021 at 09:16:57PM +0900, Hector Martin wrote:
> From: Marc Zyngier <maz@kernel.org>
> 
> By default, FIQ exceptions trigger a panic. On platforms that need to
> deliver interrupts via FIQ, this gets redirected via an alternative to
> instead handle FIQ the same way as IRQ. It is up to the irqchip handler
> to discriminate between the two.
> 
> Signed-off-by: Marc Zyngier <maz@kernel.org>
> Signed-off-by: Hector Martin <marcan@marcan.st>

Since the use of FIQ is a platform integration detail rather than a CPU
implementation detail (and e.g. can differ across bare-metal and VM),
I'd prefer to always have separate registered handlers for IRQ/FIQ (also
avoiding the need for patching). That way we can explicitly opt-in to
FIQ when required, and avoid edge-cases where an unexpected FIQ could
livelock an unaware IRQ handler.

Marc and I had a quick play with that, and I have a series of patches
I've pushed to:

https://git.kernel.org/pub/scm/linux/kernel/git/mark/linux.git/log/?h=arm64/fiq
git://git.kernel.org/pub/scm/linux/kernel/git/mark/linux.git arm64/fiq

... which I'll post out shortly.

Thanks,
Mark.

> ---
>  arch/arm64/kernel/entry.S | 21 ++++++++++++++++++---
>  1 file changed, 18 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
> index ba5f9aa379ce..bcfd1ac72636 100644
> --- a/arch/arm64/kernel/entry.S
> +++ b/arch/arm64/kernel/entry.S
> @@ -547,18 +547,18 @@ SYM_CODE_START(vectors)
>  
>  	kernel_ventry	1, sync				// Synchronous EL1h
>  	kernel_ventry	1, irq				// IRQ EL1h
> -	kernel_ventry	1, fiq_invalid			// FIQ EL1h
> +	kernel_ventry	1, fiq				// FIQ EL1h
>  	kernel_ventry	1, error			// Error EL1h
>  
>  	kernel_ventry	0, sync				// Synchronous 64-bit EL0
>  	kernel_ventry	0, irq				// IRQ 64-bit EL0
> -	kernel_ventry	0, fiq_invalid			// FIQ 64-bit EL0
> +	kernel_ventry	0, fiq				// FIQ 64-bit EL0
>  	kernel_ventry	0, error			// Error 64-bit EL0
>  
>  #ifdef CONFIG_COMPAT
>  	kernel_ventry	0, sync_compat, 32		// Synchronous 32-bit EL0
>  	kernel_ventry	0, irq_compat, 32		// IRQ 32-bit EL0
> -	kernel_ventry	0, fiq_invalid_compat, 32	// FIQ 32-bit EL0
> +	kernel_ventry	0, fiq_compat, 32		// FIQ 32-bit EL0
>  	kernel_ventry	0, error_compat, 32		// Error 32-bit EL0
>  #else
>  	kernel_ventry	0, sync_invalid, 32		// Synchronous 32-bit EL0
> @@ -658,6 +658,10 @@ SYM_CODE_START_LOCAL_NOALIGN(el1_sync)
>  SYM_CODE_END(el1_sync)
>  
>  	.align	6
> +SYM_CODE_START_LOCAL_NOALIGN(el1_fiq)
> +alternative_if_not ARM64_NEEDS_FIQ
> +	b	el1_fiq_invalid
> +alternative_else_nop_endif
>  SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
>  	kernel_entry 1
>  	gic_prio_irq_setup pmr=x20, tmp=x1
> @@ -688,6 +692,7 @@ alternative_else_nop_endif
>  
>  	kernel_exit 1
>  SYM_CODE_END(el1_irq)
> +SYM_CODE_END(el1_fiq)
>  
>  /*
>   * EL0 mode handlers.
> @@ -710,10 +715,15 @@ SYM_CODE_START_LOCAL_NOALIGN(el0_sync_compat)
>  SYM_CODE_END(el0_sync_compat)
>  
>  	.align	6
> +SYM_CODE_START_LOCAL_NOALIGN(el0_fiq_compat)
> +alternative_if_not ARM64_NEEDS_FIQ
> +	b	el0_fiq_invalid_compat
> +alternative_else_nop_endif
>  SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat)
>  	kernel_entry 0, 32
>  	b	el0_irq_naked
>  SYM_CODE_END(el0_irq_compat)
> +SYM_CODE_END(el0_fiq_compat)
>  
>  SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat)
>  	kernel_entry 0, 32
> @@ -722,6 +732,10 @@ SYM_CODE_END(el0_error_compat)
>  #endif
>  
>  	.align	6
> +SYM_CODE_START_LOCAL_NOALIGN(el0_fiq)
> +alternative_if_not ARM64_NEEDS_FIQ
> +	b	el0_fiq_invalid
> +alternative_else_nop_endif
>  SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
>  	kernel_entry 0
>  el0_irq_naked:
> @@ -736,6 +750,7 @@ el0_irq_naked:
>  
>  	b	ret_to_user
>  SYM_CODE_END(el0_irq)
> +SYM_CODE_END(el0_fiq)
>  
>  SYM_CODE_START_LOCAL(el1_error)
>  	kernel_entry 1
> -- 
> 2.30.0
>
Marc Zyngier Feb. 17, 2021, 2:38 p.m. UTC | #2
On Wed, 17 Feb 2021 11:49:23 +0000,
Mark Rutland <mark.rutland@arm.com> wrote:
> 
> Hi Hector,
> 
> On Mon, Feb 15, 2021 at 09:16:57PM +0900, Hector Martin wrote:
> > From: Marc Zyngier <maz@kernel.org>
> > 
> > By default, FIQ exceptions trigger a panic. On platforms that need to
> > deliver interrupts via FIQ, this gets redirected via an alternative to
> > instead handle FIQ the same way as IRQ. It is up to the irqchip handler
> > to discriminate between the two.
> > 
> > Signed-off-by: Marc Zyngier <maz@kernel.org>
> > Signed-off-by: Hector Martin <marcan@marcan.st>
> 
> Since the use of FIQ is a platform integration detail rather than a CPU
> implementation detail (and e.g. can differ across bare-metal and VM),
> I'd prefer to always have separate registered handlers for IRQ/FIQ (also
> avoiding the need for patching). That way we can explicitly opt-in to
> FIQ when required, and avoid edge-cases where an unexpected FIQ could
> livelock an unaware IRQ handler.
> 
> Marc and I had a quick play with that, and I have a series of patches
> I've pushed to:
> 
> https://git.kernel.org/pub/scm/linux/kernel/git/mark/linux.git/log/?h=arm64/fiq
> git://git.kernel.org/pub/scm/linux/kernel/git/mark/linux.git arm64/fiq
> 
> ... which I'll post out shortly.

FWIW, I've just posted a more complete version of the first patch in
this series[1], which you may want to use directly (though I plan to
take it as a fix for 5.12).

Thanks,

	M.

[1] https://lore.kernel.org/r/20210217142800.2547737-1-maz@kernel.org
diff mbox series

Patch

diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index ba5f9aa379ce..bcfd1ac72636 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -547,18 +547,18 @@  SYM_CODE_START(vectors)
 
 	kernel_ventry	1, sync				// Synchronous EL1h
 	kernel_ventry	1, irq				// IRQ EL1h
-	kernel_ventry	1, fiq_invalid			// FIQ EL1h
+	kernel_ventry	1, fiq				// FIQ EL1h
 	kernel_ventry	1, error			// Error EL1h
 
 	kernel_ventry	0, sync				// Synchronous 64-bit EL0
 	kernel_ventry	0, irq				// IRQ 64-bit EL0
-	kernel_ventry	0, fiq_invalid			// FIQ 64-bit EL0
+	kernel_ventry	0, fiq				// FIQ 64-bit EL0
 	kernel_ventry	0, error			// Error 64-bit EL0
 
 #ifdef CONFIG_COMPAT
 	kernel_ventry	0, sync_compat, 32		// Synchronous 32-bit EL0
 	kernel_ventry	0, irq_compat, 32		// IRQ 32-bit EL0
-	kernel_ventry	0, fiq_invalid_compat, 32	// FIQ 32-bit EL0
+	kernel_ventry	0, fiq_compat, 32		// FIQ 32-bit EL0
 	kernel_ventry	0, error_compat, 32		// Error 32-bit EL0
 #else
 	kernel_ventry	0, sync_invalid, 32		// Synchronous 32-bit EL0
@@ -658,6 +658,10 @@  SYM_CODE_START_LOCAL_NOALIGN(el1_sync)
 SYM_CODE_END(el1_sync)
 
 	.align	6
+SYM_CODE_START_LOCAL_NOALIGN(el1_fiq)
+alternative_if_not ARM64_NEEDS_FIQ
+	b	el1_fiq_invalid
+alternative_else_nop_endif
 SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
 	kernel_entry 1
 	gic_prio_irq_setup pmr=x20, tmp=x1
@@ -688,6 +692,7 @@  alternative_else_nop_endif
 
 	kernel_exit 1
 SYM_CODE_END(el1_irq)
+SYM_CODE_END(el1_fiq)
 
 /*
  * EL0 mode handlers.
@@ -710,10 +715,15 @@  SYM_CODE_START_LOCAL_NOALIGN(el0_sync_compat)
 SYM_CODE_END(el0_sync_compat)
 
 	.align	6
+SYM_CODE_START_LOCAL_NOALIGN(el0_fiq_compat)
+alternative_if_not ARM64_NEEDS_FIQ
+	b	el0_fiq_invalid_compat
+alternative_else_nop_endif
 SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat)
 	kernel_entry 0, 32
 	b	el0_irq_naked
 SYM_CODE_END(el0_irq_compat)
+SYM_CODE_END(el0_fiq_compat)
 
 SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat)
 	kernel_entry 0, 32
@@ -722,6 +732,10 @@  SYM_CODE_END(el0_error_compat)
 #endif
 
 	.align	6
+SYM_CODE_START_LOCAL_NOALIGN(el0_fiq)
+alternative_if_not ARM64_NEEDS_FIQ
+	b	el0_fiq_invalid
+alternative_else_nop_endif
 SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
 	kernel_entry 0
 el0_irq_naked:
@@ -736,6 +750,7 @@  el0_irq_naked:
 
 	b	ret_to_user
 SYM_CODE_END(el0_irq)
+SYM_CODE_END(el0_fiq)
 
 SYM_CODE_START_LOCAL(el1_error)
 	kernel_entry 1