diff mbox series

[RFC] arm64: irqflags: fix incomplete save & restore

Message ID 20190403152854.27741-1-liwei391@huawei.com (mailing list archive)
State RFC
Headers show
Series [RFC] arm64: irqflags: fix incomplete save & restore | expand

Commit Message

liwei (GF) April 3, 2019, 3:28 p.m. UTC
To support the arm64 pseudo nmi, function arch_local_irq_save() and
arch_local_irq_restore() now operate ICC_PMR_EL1 insead of daif.
But i found the logic of the save and restore may be suspicious:

arch_local_irq_save():
daif.i_on	pmr_on	->	flag.i_on
1			0		|	0
1			1		|	1
0			1		|	0		--[1]
0			0		|	0

arch_local_irq_restore():
daif.i_on	pmr_on	<-	flag.i_on
x			0		|	0
x			1		|	1

As we see, the condintion [1] will never be restored honestly. When doing
function_graph trace at gic_handle_irq(), calling local_irq_save() and
local_irq_restore() in trace_graph_entry() will just go into this
condintion. Therefore the irq can never be processed and leading to hang.

In this patch, we do the save & restore exactly, and make sure the
arch_irqs_disabled_flags() returns correctly.

Fix: commit 4a503217ce37 ("arm64: irqflags: Use ICC_PMR_EL1 for interrupt masking")
Signed-off-by: Wei Li <liwei391@huawei.com>
---
 arch/arm64/include/asm/irqflags.h | 17 +++++++++--------
 1 file changed, 9 insertions(+), 8 deletions(-)

Comments

Julien Thierry April 3, 2019, 4:30 p.m. UTC | #1
Hi Wei,

Thanks for looking into this.

On 03/04/2019 16:28, Wei Li wrote:
> To support the arm64 pseudo nmi, function arch_local_irq_save() and
> arch_local_irq_restore() now operate ICC_PMR_EL1 insead of daif.
> But i found the logic of the save and restore may be suspicious:
> 
> arch_local_irq_save():
> daif.i_on	pmr_on	->	flag.i_on
> 1			0		|	0
> 1			1		|	1
> 0			1		|	0		--[1]
> 0			0		|	0
> 
> arch_local_irq_restore():
> daif.i_on	pmr_on	<-	flag.i_on
> x			0		|	0
> x			1		|	1
> 
> As we see, the condintion [1] will never be restored honestly. When doing
> function_graph trace at gic_handle_irq(), calling local_irq_save() and
> local_irq_restore() in trace_graph_entry() will just go into this
> condintion. Therefore the irq can never be processed and leading to hang.
> 
> In this patch, we do the save & restore exactly, and make sure the
> arch_irqs_disabled_flags() returns correctly.

Thinking out loud:

So, with this patch we make sure that PMR is not sneakily altered when
restoring a state with PSR.I == 1 and PMR == GIC_PRIO_IRQON.

One thing that could be odd from such a state is:

	flags = local_irq_save(); // local_irqs_disabled_flags(flags) == true

	[...]

	write_sysreg(ICC_PMR_EL1, GIC_PRIO_IRQOFF);
	write_sysreg(daif, read_sysreg(daif) & ~PSR_I_BIT);

	[...]

	local_irq_restore(flags);

Where restoring flags that indicated us that interrupts were disabled
suddenly enable interrupts.

But doing a save/restore around an operation that fiddles with PSR.I
doesn't make sense, especially when we are using PMR for normal
enable/disable.

I think your suggestion is better that what we have right now, but I'd
like to think about it a bit more.

In a previous version of the priority masking, PSR.I was both saved and
restored in the flags (almost like you did except you don't restore it
in your case). So I'm wondering whether we should just go back to that
option... (Maybe taking inspiration from your patch and just keep all
daif bits and stuff them as they are in the upper 32 bits since both
daif and pmr are 32 bit registers and the flags are 64 bits).

Other people's opinion are welcome as well.

> 
> Fix: commit 4a503217ce37 ("arm64: irqflags: Use ICC_PMR_EL1 for interrupt masking")
> Signed-off-by: Wei Li <liwei391@huawei.com>
> ---
>  arch/arm64/include/asm/irqflags.h | 17 +++++++++--------
>  1 file changed, 9 insertions(+), 8 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
> index 43d8366c1e87..7bc6a79f31c4 100644
> --- a/arch/arm64/include/asm/irqflags.h
> +++ b/arch/arm64/include/asm/irqflags.h
> @@ -76,8 +76,7 @@ static inline unsigned long arch_local_save_flags(void)
>  	 * The asm is logically equivalent to:
>  	 *
>  	 * if (system_uses_irq_prio_masking())
> -	 *	flags = (daif_bits & PSR_I_BIT) ?
> -	 *			GIC_PRIO_IRQOFF :
> +	 *	flags = (daif_bits << 32) |
>  	 *			read_sysreg_s(SYS_ICC_PMR_EL1);
>  	 * else
>  	 *	flags = daif_bits;
> @@ -87,11 +86,11 @@ static inline unsigned long arch_local_save_flags(void)
>  			"nop\n"
>  			"nop",
>  			"mrs_s	%0, " __stringify(SYS_ICC_PMR_EL1) "\n"
> -			"ands	%1, %1, " __stringify(PSR_I_BIT) "\n"
> -			"csel	%0, %0, %2, eq",
> +			"lsl	%1, %1, #32\n"
> +			"orr	%0, %0, %1",
>  			ARM64_HAS_IRQ_PRIO_MASKING)
>  		: "=&r" (flags), "+r" (daif_bits)
> -		: "r" ((unsigned long) GIC_PRIO_IRQOFF)
> +		:
>  		: "memory");
>  
>  	return flags;
> @@ -119,8 +118,8 @@ static inline void arch_local_irq_restore(unsigned long flags)
>  			"msr_s	" __stringify(SYS_ICC_PMR_EL1) ", %0\n"
>  			"dsb	sy",
>  			ARM64_HAS_IRQ_PRIO_MASKING)
> -		: "+r" (flags)
>  		:
> +		: "r" ((int)flags)

Nit: If we go with that, can we cast this to "u32" or "uint32_t"?

>  		: "memory");
>  }
>  
> @@ -130,12 +129,14 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
>  
>  	asm volatile(ALTERNATIVE(
>  			"and	%w0, %w1, #" __stringify(PSR_I_BIT) "\n"
> +			"nop\n"
>  			"nop",
> +			"and	%w0, %w2, #" __stringify(PSR_I_BIT) "\n"
>  			"cmp	%w1, #" __stringify(GIC_PRIO_IRQOFF) "\n"
> -			"cset	%w0, ls",
> +			"cinc	%w0, %w0, ls",
>  			ARM64_HAS_IRQ_PRIO_MASKING)
>  		: "=&r" (res)
> -		: "r" ((int) flags)
> +		: "r" ((int) flags), "r" (flags >> 32)

Same.

>  		: "memory");
>  
>  	return res;
> 

Thanks,
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
index 43d8366c1e87..7bc6a79f31c4 100644
--- a/arch/arm64/include/asm/irqflags.h
+++ b/arch/arm64/include/asm/irqflags.h
@@ -76,8 +76,7 @@  static inline unsigned long arch_local_save_flags(void)
 	 * The asm is logically equivalent to:
 	 *
 	 * if (system_uses_irq_prio_masking())
-	 *	flags = (daif_bits & PSR_I_BIT) ?
-	 *			GIC_PRIO_IRQOFF :
+	 *	flags = (daif_bits << 32) |
 	 *			read_sysreg_s(SYS_ICC_PMR_EL1);
 	 * else
 	 *	flags = daif_bits;
@@ -87,11 +86,11 @@  static inline unsigned long arch_local_save_flags(void)
 			"nop\n"
 			"nop",
 			"mrs_s	%0, " __stringify(SYS_ICC_PMR_EL1) "\n"
-			"ands	%1, %1, " __stringify(PSR_I_BIT) "\n"
-			"csel	%0, %0, %2, eq",
+			"lsl	%1, %1, #32\n"
+			"orr	%0, %0, %1",
 			ARM64_HAS_IRQ_PRIO_MASKING)
 		: "=&r" (flags), "+r" (daif_bits)
-		: "r" ((unsigned long) GIC_PRIO_IRQOFF)
+		:
 		: "memory");
 
 	return flags;
@@ -119,8 +118,8 @@  static inline void arch_local_irq_restore(unsigned long flags)
 			"msr_s	" __stringify(SYS_ICC_PMR_EL1) ", %0\n"
 			"dsb	sy",
 			ARM64_HAS_IRQ_PRIO_MASKING)
-		: "+r" (flags)
 		:
+		: "r" ((int)flags)
 		: "memory");
 }
 
@@ -130,12 +129,14 @@  static inline int arch_irqs_disabled_flags(unsigned long flags)
 
 	asm volatile(ALTERNATIVE(
 			"and	%w0, %w1, #" __stringify(PSR_I_BIT) "\n"
+			"nop\n"
 			"nop",
+			"and	%w0, %w2, #" __stringify(PSR_I_BIT) "\n"
 			"cmp	%w1, #" __stringify(GIC_PRIO_IRQOFF) "\n"
-			"cset	%w0, ls",
+			"cinc	%w0, %w0, ls",
 			ARM64_HAS_IRQ_PRIO_MASKING)
 		: "=&r" (res)
-		: "r" ((int) flags)
+		: "r" ((int) flags), "r" (flags >> 32)
 		: "memory");
 
 	return res;