diff mbox series

[v6,10/24] arm64: irqflags: Use ICC_PMR_EL1 for interrupt masking

Message ID 1542023835-21446-11-git-send-email-julien.thierry@arm.com (mailing list archive)
State New, archived
Headers show
Series arm64: provide pseudo NMI with GICv3 | expand

Commit Message

Julien Thierry Nov. 12, 2018, 11:57 a.m. UTC
Instead disabling interrupts by setting the PSR.I bit, use a priority
higher than the one used for interrupts to mask them via PMR.

The value chosen for PMR to enable/disable interrupts encodes the status
of interrupts on a single bit. This information is stored in the irqflags
values used when saving/restoring IRQ status.

Signed-off-by: Julien Thierry <julien.thierry@arm.com>
Suggested-by: Daniel Thompson <daniel.thompson@linaro.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Oleg Nesterov <oleg@redhat.com>
---
 arch/arm64/include/asm/efi.h      |   3 +-
 arch/arm64/include/asm/irqflags.h | 132 +++++++++++++++++++++++++++++---------
 2 files changed, 105 insertions(+), 30 deletions(-)

Comments

Catalin Marinas Dec. 4, 2018, 5:36 p.m. UTC | #1
On Mon, Nov 12, 2018 at 11:57:01AM +0000, Julien Thierry wrote:
> diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
> index 24692ed..e0a32e4 100644
> --- a/arch/arm64/include/asm/irqflags.h
> +++ b/arch/arm64/include/asm/irqflags.h
> @@ -18,7 +18,27 @@
>  
>  #ifdef __KERNEL__
>  
> +#include <asm/alternative.h>
> +#include <asm/cpufeature.h>
>  #include <asm/ptrace.h>
> +#include <asm/sysreg.h>
> +
> +
> +/*
> + * When ICC_PMR_EL1 is used for interrupt masking, only the bit indicating
> + * whether the normal interrupts are masked is kept along with the daif
> + * flags.
> + */
> +#define ARCH_FLAG_PMR_EN 0x1
> +
> +#define MAKE_ARCH_FLAGS(daif, pmr)					\
> +	((daif) | (((pmr) >> GIC_PRIO_STATUS_SHIFT) & ARCH_FLAG_PMR_EN))
> +
> +#define ARCH_FLAGS_GET_PMR(flags)				 \
> +	((((flags) & ARCH_FLAG_PMR_EN) << GIC_PRIO_STATUS_SHIFT) \
> +		| GIC_PRIO_IRQOFF)
> +
> +#define ARCH_FLAGS_GET_DAIF(flags) ((flags) & ~ARCH_FLAG_PMR_EN)

I wonder whether we could just use the PSR_I_BIT here to decide whether
to set the GIC_PRIO_IRQ{ON,OFF}. We could clear the PSR_I_BIT in
_restore_daif() with an alternative.

> +/*
> + * CPU interrupt mask handling.
> + */
>  static inline void arch_local_irq_enable(void)
>  {
> -	asm volatile(
> -		"msr	daifclr, #2		// arch_local_irq_enable"
> -		:
> +	unsigned long unmasked = GIC_PRIO_IRQON;
> +
> +	asm volatile(ALTERNATIVE(
> +		"msr	daifclr, #2		// arch_local_irq_enable\n"
> +		"nop",
> +		"msr_s  " __stringify(SYS_ICC_PMR_EL1) ",%0\n"
> +		"dsb	sy",
> +		ARM64_HAS_IRQ_PRIO_MASKING)

DSB needed here as well? I guess I'd have to read the GIC spec before
asking again ;).
Julien Thierry Dec. 5, 2018, 4:55 p.m. UTC | #2
On 04/12/18 17:36, Catalin Marinas wrote:
> On Mon, Nov 12, 2018 at 11:57:01AM +0000, Julien Thierry wrote:
>> diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
>> index 24692ed..e0a32e4 100644
>> --- a/arch/arm64/include/asm/irqflags.h
>> +++ b/arch/arm64/include/asm/irqflags.h
>> @@ -18,7 +18,27 @@
>>  
>>  #ifdef __KERNEL__
>>  
>> +#include <asm/alternative.h>
>> +#include <asm/cpufeature.h>
>>  #include <asm/ptrace.h>
>> +#include <asm/sysreg.h>
>> +
>> +
>> +/*
>> + * When ICC_PMR_EL1 is used for interrupt masking, only the bit indicating
>> + * whether the normal interrupts are masked is kept along with the daif
>> + * flags.
>> + */
>> +#define ARCH_FLAG_PMR_EN 0x1
>> +
>> +#define MAKE_ARCH_FLAGS(daif, pmr)					\
>> +	((daif) | (((pmr) >> GIC_PRIO_STATUS_SHIFT) & ARCH_FLAG_PMR_EN))
>> +
>> +#define ARCH_FLAGS_GET_PMR(flags)				 \
>> +	((((flags) & ARCH_FLAG_PMR_EN) << GIC_PRIO_STATUS_SHIFT) \
>> +		| GIC_PRIO_IRQOFF)
>> +
>> +#define ARCH_FLAGS_GET_DAIF(flags) ((flags) & ~ARCH_FLAG_PMR_EN)
> 
> I wonder whether we could just use the PSR_I_BIT here to decide whether
> to set the GIC_PRIO_IRQ{ON,OFF}. We could clear the PSR_I_BIT in
> _restore_daif() with an alternative.
> 

So, the issue with it is that some contexts might be using PSR.I to
disable interrupts (any contexts with async errors or debug exceptions
disabled, kvm guest entry paths, pseudo-NMIs, ...).

If any of these contexts calls local_irq_save()/local_irq_restore() or
local_daif_save()/local_daif_restore(), by only relying on PSR_I_BIT to
represent the PMR status, we might end up clearing PSR.I when we shouldn't.

I'm not sure whether there are no callers of these functions in those
context. But if that is the case, we could simplify things, yes.

Thanks,


>> +/*
>> + * CPU interrupt mask handling.
>> + */
>>  static inline void arch_local_irq_enable(void)
>>  {
>> -	asm volatile(
>> -		"msr	daifclr, #2		// arch_local_irq_enable"
>> -		:
>> +	unsigned long unmasked = GIC_PRIO_IRQON;
>> +
>> +	asm volatile(ALTERNATIVE(
>> +		"msr	daifclr, #2		// arch_local_irq_enable\n"
>> +		"nop",
>> +		"msr_s  " __stringify(SYS_ICC_PMR_EL1) ",%0\n"
>> +		"dsb	sy",
>> +		ARM64_HAS_IRQ_PRIO_MASKING)
> 
> DSB needed here as well? I guess I'd have to read the GIC spec before
> asking again ;).
>
Catalin Marinas Dec. 5, 2018, 6:26 p.m. UTC | #3
On Wed, Dec 05, 2018 at 04:55:54PM +0000, Julien Thierry wrote:
> On 04/12/18 17:36, Catalin Marinas wrote:
> > On Mon, Nov 12, 2018 at 11:57:01AM +0000, Julien Thierry wrote:
> >> diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
> >> index 24692ed..e0a32e4 100644
> >> --- a/arch/arm64/include/asm/irqflags.h
> >> +++ b/arch/arm64/include/asm/irqflags.h
> >> @@ -18,7 +18,27 @@
> >>  
> >>  #ifdef __KERNEL__
> >>  
> >> +#include <asm/alternative.h>
> >> +#include <asm/cpufeature.h>
> >>  #include <asm/ptrace.h>
> >> +#include <asm/sysreg.h>
> >> +
> >> +
> >> +/*
> >> + * When ICC_PMR_EL1 is used for interrupt masking, only the bit indicating
> >> + * whether the normal interrupts are masked is kept along with the daif
> >> + * flags.
> >> + */
> >> +#define ARCH_FLAG_PMR_EN 0x1
> >> +
> >> +#define MAKE_ARCH_FLAGS(daif, pmr)					\
> >> +	((daif) | (((pmr) >> GIC_PRIO_STATUS_SHIFT) & ARCH_FLAG_PMR_EN))
> >> +
> >> +#define ARCH_FLAGS_GET_PMR(flags)				 \
> >> +	((((flags) & ARCH_FLAG_PMR_EN) << GIC_PRIO_STATUS_SHIFT) \
> >> +		| GIC_PRIO_IRQOFF)
> >> +
> >> +#define ARCH_FLAGS_GET_DAIF(flags) ((flags) & ~ARCH_FLAG_PMR_EN)
> > 
> > I wonder whether we could just use the PSR_I_BIT here to decide whether
> > to set the GIC_PRIO_IRQ{ON,OFF}. We could clear the PSR_I_BIT in
> > _restore_daif() with an alternative.
> 
> So, the issue with it is that some contexts might be using PSR.I to
> disable interrupts (any contexts with async errors or debug exceptions
> disabled, kvm guest entry paths, pseudo-NMIs, ...).
> 
> If any of these contexts calls local_irq_save()/local_irq_restore() or
> local_daif_save()/local_daif_restore(), by only relying on PSR_I_BIT to
> represent the PMR status, we might end up clearing PSR.I when we shouldn't.
> 
> I'm not sure whether there are no callers of these functions in those
> context. But if that is the case, we could simplify things, yes.

There are callers of local_daif_save() (3) and local_daif_mask() (7) but
do they all need to disable the pseudo-NMIs?

At a brief look at x86, it seems that they have something like
stop_nmi() and restart_nmi(). These don't have save/restore semantics,
so we could do something similar on arm64 that only deals with the
PSTATE.I bit directly and keep the software (flags) PSR.I as the PMR
bit. But we'd have to go through the 10 local_daif_* cases above to see
which actually need the stop_nmi() semantics.
Julien Thierry Dec. 6, 2018, 9:50 a.m. UTC | #4
On 05/12/18 18:26, Catalin Marinas wrote:
> On Wed, Dec 05, 2018 at 04:55:54PM +0000, Julien Thierry wrote:
>> On 04/12/18 17:36, Catalin Marinas wrote:
>>> On Mon, Nov 12, 2018 at 11:57:01AM +0000, Julien Thierry wrote:
>>>> diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
>>>> index 24692ed..e0a32e4 100644
>>>> --- a/arch/arm64/include/asm/irqflags.h
>>>> +++ b/arch/arm64/include/asm/irqflags.h
>>>> @@ -18,7 +18,27 @@
>>>>  
>>>>  #ifdef __KERNEL__
>>>>  
>>>> +#include <asm/alternative.h>
>>>> +#include <asm/cpufeature.h>
>>>>  #include <asm/ptrace.h>
>>>> +#include <asm/sysreg.h>
>>>> +
>>>> +
>>>> +/*
>>>> + * When ICC_PMR_EL1 is used for interrupt masking, only the bit indicating
>>>> + * whether the normal interrupts are masked is kept along with the daif
>>>> + * flags.
>>>> + */
>>>> +#define ARCH_FLAG_PMR_EN 0x1
>>>> +
>>>> +#define MAKE_ARCH_FLAGS(daif, pmr)					\
>>>> +	((daif) | (((pmr) >> GIC_PRIO_STATUS_SHIFT) & ARCH_FLAG_PMR_EN))
>>>> +
>>>> +#define ARCH_FLAGS_GET_PMR(flags)				 \
>>>> +	((((flags) & ARCH_FLAG_PMR_EN) << GIC_PRIO_STATUS_SHIFT) \
>>>> +		| GIC_PRIO_IRQOFF)
>>>> +
>>>> +#define ARCH_FLAGS_GET_DAIF(flags) ((flags) & ~ARCH_FLAG_PMR_EN)
>>>
>>> I wonder whether we could just use the PSR_I_BIT here to decide whether
>>> to set the GIC_PRIO_IRQ{ON,OFF}. We could clear the PSR_I_BIT in
>>> _restore_daif() with an alternative.
>>
>> So, the issue with it is that some contexts might be using PSR.I to
>> disable interrupts (any contexts with async errors or debug exceptions
>> disabled, kvm guest entry paths, pseudo-NMIs, ...).
>>
>> If any of these contexts calls local_irq_save()/local_irq_restore() or
>> local_daif_save()/local_daif_restore(), by only relying on PSR_I_BIT to
>> represent the PMR status, we might end up clearing PSR.I when we shouldn't.
>>
>> I'm not sure whether there are no callers of these functions in those
>> context. But if that is the case, we could simplify things, yes.
> 
> There are callers of local_daif_save() (3) and local_daif_mask() (7) but
> do they all need to disable the pseudo-NMIs?
> 

Hmmm, I really think that both of those should be disabling NMIs.
Otherwise, if we take an NMI, the first thing the el1_irq handler is
going to do is "enable_da_f()" which could lead to potential issues.

One thing that could be done is:
- local_daif_save() and local_daif_mask() both mask all daif bits
(taking care to represent PMR value in the I bit of the saved flags)
- local_daif_restore() restores da_f as expected and decides values to
put for PMR and PSR.I as follows:
	* do the da_f restore
	* if PSR.A bit is cleared in the saved flags, then we also do a start_nmi()

However, this would not work with a local_daif_save()/restore() on the
return path of an NMI because I think it is the only context with NMIs
"stopped" that can take aborts. I can add a WARN_ON(in_nmi()) for
local_daif_restore() if that doesn't affect performance too much.

Does that sound alright?

> At a brief look at x86, it seems that they have something like
> stop_nmi() and restart_nmi(). These don't have save/restore semantics,
> so we could do something similar on arm64 that only deals with the
> PSTATE.I bit directly and keep the software (flags) PSR.I as the PMR
> bit. But we'd have to go through the 10 local_daif_* cases above to see
> which actually need the stop_nmi() semantics.
> 

Yes, having those could be useful to deal with the above and maybe some
other places.

Thanks,
Catalin Marinas Dec. 10, 2018, 2:39 p.m. UTC | #5
On Thu, Dec 06, 2018 at 09:50:18AM +0000, Julien Thierry wrote:
> On 05/12/18 18:26, Catalin Marinas wrote:
> > On Wed, Dec 05, 2018 at 04:55:54PM +0000, Julien Thierry wrote:
> >> On 04/12/18 17:36, Catalin Marinas wrote:
> >>> On Mon, Nov 12, 2018 at 11:57:01AM +0000, Julien Thierry wrote:
> >>>> diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
> >>>> index 24692ed..e0a32e4 100644
> >>>> --- a/arch/arm64/include/asm/irqflags.h
> >>>> +++ b/arch/arm64/include/asm/irqflags.h
> >>>> @@ -18,7 +18,27 @@
> >>>>  
> >>>>  #ifdef __KERNEL__
> >>>>  
> >>>> +#include <asm/alternative.h>
> >>>> +#include <asm/cpufeature.h>
> >>>>  #include <asm/ptrace.h>
> >>>> +#include <asm/sysreg.h>
> >>>> +
> >>>> +
> >>>> +/*
> >>>> + * When ICC_PMR_EL1 is used for interrupt masking, only the bit indicating
> >>>> + * whether the normal interrupts are masked is kept along with the daif
> >>>> + * flags.
> >>>> + */
> >>>> +#define ARCH_FLAG_PMR_EN 0x1
> >>>> +
> >>>> +#define MAKE_ARCH_FLAGS(daif, pmr)					\
> >>>> +	((daif) | (((pmr) >> GIC_PRIO_STATUS_SHIFT) & ARCH_FLAG_PMR_EN))
> >>>> +
> >>>> +#define ARCH_FLAGS_GET_PMR(flags)				 \
> >>>> +	((((flags) & ARCH_FLAG_PMR_EN) << GIC_PRIO_STATUS_SHIFT) \
> >>>> +		| GIC_PRIO_IRQOFF)
> >>>> +
> >>>> +#define ARCH_FLAGS_GET_DAIF(flags) ((flags) & ~ARCH_FLAG_PMR_EN)
> >>>
> >>> I wonder whether we could just use the PSR_I_BIT here to decide whether
> >>> to set the GIC_PRIO_IRQ{ON,OFF}. We could clear the PSR_I_BIT in
> >>> _restore_daif() with an alternative.
> >>
> >> So, the issue with it is that some contexts might be using PSR.I to
> >> disable interrupts (any contexts with async errors or debug exceptions
> >> disabled, kvm guest entry paths, pseudo-NMIs, ...).
> >>
> >> If any of these contexts calls local_irq_save()/local_irq_restore() or
> >> local_daif_save()/local_daif_restore(), by only relying on PSR_I_BIT to
> >> represent the PMR status, we might end up clearing PSR.I when we shouldn't.
> >>
> >> I'm not sure whether there are no callers of these functions in those
> >> context. But if that is the case, we could simplify things, yes.
> > 
> > There are callers of local_daif_save() (3) and local_daif_mask() (7) but
> > do they all need to disable the pseudo-NMIs?
> 
> Hmmm, I really think that both of those should be disabling NMIs.
> Otherwise, if we take an NMI, the first thing the el1_irq handler is
> going to do is "enable_da_f()" which could lead to potential issues.
> 
> One thing that could be done is:
> - local_daif_save() and local_daif_mask() both mask all daif bits
> (taking care to represent PMR value in the I bit of the saved flags)
> - local_daif_restore() restores da_f as expected and decides values to
> put for PMR and PSR.I as follows:
> 	* do the da_f restore
> 	* if PSR.A bit is cleared in the saved flags, then we also do a start_nmi()
> 
> However, this would not work with a local_daif_save()/restore() on the
> return path of an NMI because I think it is the only context with NMIs
> "stopped" that can take aborts. I can add a WARN_ON(in_nmi()) for
> local_daif_restore() if that doesn't affect performance too much.

FTR, as we discussed this in the office, the conclusion (IIUC) we got to
was: leave the *_daif_*() functions unchanged, touching all the
corresponding PSTATE bits, but change the arch_local_irq_*() macros to
only touch the PMR when the feature is enabled.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 7ed3208..3e06891 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -42,7 +42,8 @@ 
 
 efi_status_t __efi_rt_asm_wrapper(void *, const char *, ...);
 
-#define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
+#define ARCH_EFI_IRQ_FLAGS_MASK \
+	(PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | ARCH_FLAG_PMR_EN)
 
 /* arch specific definitions used by the stub code */
 
diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
index 24692ed..e0a32e4 100644
--- a/arch/arm64/include/asm/irqflags.h
+++ b/arch/arm64/include/asm/irqflags.h
@@ -18,7 +18,27 @@ 
 
 #ifdef __KERNEL__
 
+#include <asm/alternative.h>
+#include <asm/cpufeature.h>
 #include <asm/ptrace.h>
+#include <asm/sysreg.h>
+
+
+/*
+ * When ICC_PMR_EL1 is used for interrupt masking, only the bit indicating
+ * whether the normal interrupts are masked is kept along with the daif
+ * flags.
+ */
+#define ARCH_FLAG_PMR_EN 0x1
+
+#define MAKE_ARCH_FLAGS(daif, pmr)					\
+	((daif) | (((pmr) >> GIC_PRIO_STATUS_SHIFT) & ARCH_FLAG_PMR_EN))
+
+#define ARCH_FLAGS_GET_PMR(flags)				 \
+	((((flags) & ARCH_FLAG_PMR_EN) << GIC_PRIO_STATUS_SHIFT) \
+		| GIC_PRIO_IRQOFF)
+
+#define ARCH_FLAGS_GET_DAIF(flags) ((flags) & ~ARCH_FLAG_PMR_EN)
 
 /*
  * Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and
@@ -34,35 +54,62 @@ 
  */
 
 /*
- * CPU interrupt mask handling.
+ * Local definitions to help us manage between PMR and daif
  */
-static inline unsigned long arch_local_irq_save(void)
-{
-	unsigned long flags;
-	asm volatile(
-		"mrs	%0, daif		// arch_local_irq_save\n"
-		"msr	daifset, #2"
-		: "=r" (flags)
-		:
-		: "memory");
-	return flags;
-}
+#define _save_daif(dest)						\
+	asm volatile("mrs	%0, daif" : "=&r" (dest) : : "memory")
+
+#define _restore_daif(daif)						\
+	asm volatile("msr	daif, %0" : : "r" (daif) : "memory")
 
+#define _save_pmr(dest)							\
+	asm volatile(ALTERNATIVE(					\
+		     "mov	%0, #" __stringify(GIC_PRIO_IRQON),	\
+		     "mrs_s	%0, " __stringify(SYS_ICC_PMR_EL1),	\
+		     ARM64_HAS_IRQ_PRIO_MASKING)			\
+		: "=&r" (dest)						\
+		:							\
+		: "memory")
+
+#define _restore_pmr(pmr)						\
+	asm volatile(ALTERNATIVE(					\
+		     "nop\n"						\
+		     "nop",						\
+		     "msr_s  " __stringify(SYS_ICC_PMR_EL1) ",%0\n"	\
+		     "dsb	sy",					\
+		     ARM64_HAS_IRQ_PRIO_MASKING)			\
+		:							\
+		: "r" (pmr)						\
+		: "memory")
+
+/*
+ * CPU interrupt mask handling.
+ */
 static inline void arch_local_irq_enable(void)
 {
-	asm volatile(
-		"msr	daifclr, #2		// arch_local_irq_enable"
-		:
+	unsigned long unmasked = GIC_PRIO_IRQON;
+
+	asm volatile(ALTERNATIVE(
+		"msr	daifclr, #2		// arch_local_irq_enable\n"
+		"nop",
+		"msr_s  " __stringify(SYS_ICC_PMR_EL1) ",%0\n"
+		"dsb	sy",
+		ARM64_HAS_IRQ_PRIO_MASKING)
 		:
+		: "r" (unmasked)
 		: "memory");
 }
 
 static inline void arch_local_irq_disable(void)
 {
-	asm volatile(
-		"msr	daifset, #2		// arch_local_irq_disable"
-		:
+	unsigned long masked = GIC_PRIO_IRQOFF;
+
+	asm volatile(ALTERNATIVE(
+		"msr	daifset, #2		// arch_local_irq_disable",
+		"msr_s  " __stringify(SYS_ICC_PMR_EL1) ",%0",
+		ARM64_HAS_IRQ_PRIO_MASKING)
 		:
+		: "r" (masked)
 		: "memory");
 }
 
@@ -71,12 +118,24 @@  static inline void arch_local_irq_disable(void)
  */
 static inline unsigned long arch_local_save_flags(void)
 {
+	unsigned long daif_flags;
+	unsigned long pmr;
+
+	_save_daif(daif_flags);
+
+	_save_pmr(pmr);
+
+	return MAKE_ARCH_FLAGS(daif_flags, pmr);
+}
+
+static inline unsigned long arch_local_irq_save(void)
+{
 	unsigned long flags;
-	asm volatile(
-		"mrs	%0, daif		// arch_local_save_flags"
-		: "=r" (flags)
-		:
-		: "memory");
+
+	flags = arch_local_save_flags();
+
+	arch_local_irq_disable();
+
 	return flags;
 }
 
@@ -85,16 +144,31 @@  static inline unsigned long arch_local_save_flags(void)
  */
 static inline void arch_local_irq_restore(unsigned long flags)
 {
-	asm volatile(
-		"msr	daif, %0		// arch_local_irq_restore"
-	:
-	: "r" (flags)
-	: "memory");
+	unsigned long pmr = ARCH_FLAGS_GET_PMR(flags);
+
+	flags = ARCH_FLAGS_GET_DAIF(flags);
+
+	/*
+	 * Code switching from PSR.I interrupt disabling to PMR masking
+	 * should not lie between consecutive calls to local_irq_save()
+	 * and local_irq_restore() in the same context.
+	 * So restoring PMR and then the daif flags should be safe.
+	 */
+	_restore_pmr(pmr);
+
+	_restore_daif(flags);
 }
 
 static inline int arch_irqs_disabled_flags(unsigned long flags)
 {
-	return flags & PSR_I_BIT;
+	return (ARCH_FLAGS_GET_DAIF(flags) & (PSR_I_BIT)) |
+		!(ARCH_FLAGS_GET_PMR(flags) & GIC_PRIO_STATUS_BIT);
 }
+
+#undef _save_daif
+#undef _restore_daif
+#undef _save_pmr
+#undef _restore_pmr
+
 #endif
 #endif