@@ -163,7 +163,9 @@ static inline bool gic_prio_masking_enabled(void)
static inline void gic_pmr_mask_irqs(void)
{
- BUILD_BUG_ON(GICD_INT_DEF_PRI <= GIC_PRIO_IRQOFF);
+ BUILD_BUG_ON(GICD_INT_DEF_PRI < (GIC_PRIO_IRQOFF |
+ GIC_PRIO_WAIT_GIC_BIT));
+ BUILD_BUG_ON(GICD_INT_DEF_PRI >= GIC_PRIO_IRQON);
gic_write_pmr(GIC_PRIO_IRQOFF);
}
@@ -62,6 +62,24 @@
msr daifclr, #(8 | 4 | 1)
.endm
+ .macro suspend_irq_prio_masking, tmp:req
+#ifdef CONFIG_ARM64_PSEUDO_NMI
+ alternative_if ARM64_HAS_IRQ_PRIO_MASKING
+ mov \tmp, #GIC_PRIO_SUSPEND_MASKING
+ msr_s SYS_ICC_PMR_EL1, \tmp
+ alternative_else_nop_endif
+#endif
+ .endm
+
+ .macro set_prio_wait_gic, curr_pmr:req, tmp:req
+#ifdef CONFIG_ARM64_PSEUDO_NMI
+ alternative_if ARM64_HAS_IRQ_PRIO_MASKING
+ orr \tmp, \curr_pmr, #GIC_PRIO_WAIT_GIC_BIT
+ msr_s SYS_ICC_PMR_EL1, \tmp
+ alternative_else_nop_endif
+#endif
+ .endm
+
/*
* Save/restore interrupts.
*/
@@ -18,6 +18,7 @@
#include <linux/irqflags.h>
+#include <asm/arch_gicv3.h>
#include <asm/cpufeature.h>
#define DAIF_PROCCTX 0
@@ -32,6 +33,11 @@ static inline void local_daif_mask(void)
:
:
: "memory");
+
+ /* Don't really care for a dsb here, we don't intend to enable IRQs */
+ if (system_uses_irq_prio_masking())
+ gic_write_pmr(GIC_PRIO_SUSPEND_MASKING);
+
trace_hardirqs_off();
}
@@ -43,7 +49,7 @@ static inline unsigned long local_daif_save(void)
if (system_uses_irq_prio_masking()) {
/* If IRQs are masked with PMR, reflect it in the flags */
- if (read_sysreg_s(SYS_ICC_PMR_EL1) <= GIC_PRIO_IRQOFF)
+ if (read_sysreg_s(SYS_ICC_PMR_EL1) != GIC_PRIO_IRQON)
flags |= PSR_I_BIT;
}
@@ -59,8 +65,10 @@ static inline void local_daif_restore(unsigned long flags)
if (!irq_disabled) {
trace_hardirqs_on();
- if (system_uses_irq_prio_masking())
- arch_local_irq_enable();
+ if (system_uses_irq_prio_masking()) {
+ gic_write_pmr(GIC_PRIO_IRQON);
+ dsb(sy);
+ }
} else if (!(flags & PSR_A_BIT)) {
/*
* If interrupts are disabled but we can take
@@ -87,7 +95,7 @@ static inline void local_daif_restore(unsigned long flags)
*
* So we don't need additional synchronization here.
*/
- arch_local_irq_disable();
+ gic_write_pmr(GIC_PRIO_IRQOFF);
}
}
@@ -67,31 +67,14 @@ static inline void arch_local_irq_disable(void)
*/
static inline unsigned long arch_local_save_flags(void)
{
- unsigned long daif_bits;
unsigned long flags;
- daif_bits = read_sysreg(daif);
-
- /*
- * The asm is logically equivalent to:
- *
- * if (system_uses_irq_prio_masking())
- * flags = (daif_bits & PSR_I_BIT) ?
- * GIC_PRIO_IRQOFF :
- * read_sysreg_s(SYS_ICC_PMR_EL1);
- * else
- * flags = daif_bits;
- */
asm volatile(ALTERNATIVE(
- "mov %0, %1\n"
- "nop\n"
- "nop",
- "mrs_s %0, " __stringify(SYS_ICC_PMR_EL1) "\n"
- "ands %1, %1, " __stringify(PSR_I_BIT) "\n"
- "csel %0, %0, %2, eq",
- ARM64_HAS_IRQ_PRIO_MASKING)
- : "=&r" (flags), "+r" (daif_bits)
- : "r" ((unsigned long) GIC_PRIO_IRQOFF)
+ "mrs %0, daif",
+ "mrs_s %0, " __stringify(SYS_ICC_PMR_EL1) "\n",
+ ARM64_HAS_IRQ_PRIO_MASKING)
+ : "=&r" (flags)
+ :
: "memory");
return flags;
@@ -119,8 +102,8 @@ static inline void arch_local_irq_restore(unsigned long flags)
"msr_s " __stringify(SYS_ICC_PMR_EL1) ", %0\n"
"dsb sy",
ARM64_HAS_IRQ_PRIO_MASKING)
- : "+r" (flags)
:
+ : "r" (flags)
: "memory");
}
@@ -129,11 +112,11 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
int res;
asm volatile(ALTERNATIVE(
- "and %w0, %w1, #" __stringify(PSR_I_BIT) "\n"
- "nop",
- "cmp %w1, #" __stringify(GIC_PRIO_IRQOFF) "\n"
- "cset %w0, ls",
- ARM64_HAS_IRQ_PRIO_MASKING)
+ "and %w0, %w1, #" __stringify(PSR_I_BIT) "\n"
+ "nop",
+ "cmp %w1, #" __stringify(GIC_PRIO_IRQON) "\n"
+ "cset %w0, ne",
+ ARM64_HAS_IRQ_PRIO_MASKING)
: "=&r" (res)
: "r" ((int) flags)
: "memory");
@@ -532,9 +532,11 @@ static inline void kvm_arm_vhe_guest_enter(void)
* will not signal the CPU of interrupts of lower priority, and the
* only way to get out will be via guest exceptions.
* Naturally, we want to avoid this.
+ *
+ * local_daif_mask() already sets PMR to SUSPEND_MASKING, we just need a
+ * dsb to ensure the redistributor is forwards EL2 IRQs to the CPU.
*/
if (system_uses_irq_prio_masking()) {
- gic_write_pmr(GIC_PRIO_IRQON);
dsb(sy);
}
}
@@ -35,9 +35,29 @@
* means masking more IRQs (or at least that the same IRQs remain masked).
*
* To mask interrupts, we clear the most significant bit of PMR.
+ *
+ * Some code sections either automatically switch back to PSR.I or explicitly
+ * require to not use priority masking. GIC_PRIO_SUSPEND_MASKING is the PMR
+ * value intended for such a section, where interrupts are disabled, but isn't
+ * done via priority masking.
+ *
+ * Another special case is the path from taking an interrupt to acknowledging
+ * it. Interrupt needs to be unmasked in order to be acknowledged, but other
+ * interrupts musn't get unmasked at that point, as the signaled interrupt
+ * could have been spurious.
+ *
+ * Overall, the layout used for PMR is:
+ * - State[7:5]:
+ * 0x7 => Suspended, 0x6 => IRQs on, 0x4 => IRQs off, other => Undefined
+ *
+ * - Wait GIC bit[4]:
+ * 0x1 => Action from GIC driver is needed before modifying PMR,
+ * 0x0 => no action required
*/
-#define GIC_PRIO_IRQON 0xf0
-#define GIC_PRIO_IRQOFF (GIC_PRIO_IRQON & ~0x80)
+#define GIC_PRIO_IRQON 0xc0
+#define GIC_PRIO_IRQOFF (GIC_PRIO_IRQON & ~0x80)
+#define GIC_PRIO_SUSPEND_MASKING (GIC_PRIO_IRQON | 0x20)
+#define GIC_PRIO_WAIT_GIC_BIT (1 << 4)
/* Additional SPSR bits not exposed in the UABI */
#define PSR_IL_BIT (1 << 20)
@@ -258,6 +258,7 @@ alternative_else_nop_endif
/*
* Registers that may be useful after this macro is invoked:
*
+ * x20 - ICC_PMR_EL1
* x21 - aborted SP
* x22 - aborted PC
* x23 - aborted PSTATE
@@ -598,6 +599,7 @@ el1_dbg:
cmp x24, #ESR_ELx_EC_BRK64 // if BRK64
cinc x24, x24, eq // set bit '0'
tbz x24, #0, el1_inv // EL1 only
+ suspend_irq_prio_masking tmp=x21
mrs x0, far_el1
mov x2, sp // struct pt_regs
bl do_debug_exception
@@ -615,7 +617,9 @@ ENDPROC(el1_sync)
.align 6
el1_irq:
kernel_entry 1
+ set_prio_wait_gic curr_pmr=x20, tmp=x0
enable_da_f
+
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
@@ -644,11 +648,12 @@ alternative_else
mov x20, #GIC_PRIO_IRQON
alternative_endif
/*
- * if IRQs were disabled when we received the interrupt, we have an NMI
- * and we are not re-enabling interrupt upon eret. Skip tracing.
+ * When using IRQ priority masking, we can get spurious interrupts while
+ * PMR is set to GIC_PRIO_IRQOFF. An NMI might also have occurred in a
+ * section with interrupts disabled. Skip tracing in those cases.
*/
- cmp x20, #GIC_PRIO_IRQOFF
- b.ls 1f
+ cmp x20, #GIC_PRIO_IRQON
+ b.ne 1f
#endif
bl trace_hardirqs_on
1:
@@ -766,6 +771,7 @@ el0_ia:
* Instruction abort handling
*/
mrs x26, far_el1
+ suspend_irq_prio_masking tmp=x0
enable_da_f
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
@@ -811,6 +817,7 @@ el0_sp_pc:
* Stack or PC alignment exception handling
*/
mrs x26, far_el1
+ suspend_irq_prio_masking tmp=x0
enable_da_f
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
@@ -845,6 +852,7 @@ el0_dbg:
* Debug exception handling
*/
tbnz x24, #0, el0_inv // EL0 only
+ suspend_irq_prio_masking tmp=x0
mrs x0, far_el1
mov x1, x25
mov x2, sp
@@ -866,7 +874,9 @@ ENDPROC(el0_sync)
el0_irq:
kernel_entry 0
el0_irq_naked:
+ set_prio_wait_gic curr_pmr=x20, tmp=x0
enable_da_f
+
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
@@ -888,6 +898,7 @@ ENDPROC(el0_irq)
el1_error:
kernel_entry 1
mrs x1, esr_el1
+ suspend_irq_prio_masking tmp=x0
enable_dbg
mov x0, sp
bl do_serror
@@ -898,6 +909,7 @@ el0_error:
kernel_entry 0
el0_error_naked:
mrs x1, esr_el1
+ suspend_irq_prio_masking tmp=x0
enable_dbg
mov x0, sp
bl do_serror
@@ -922,6 +934,7 @@ work_pending:
*/
ret_to_user:
disable_daif
+ suspend_irq_prio_masking tmp=x0
ldr x1, [tsk, #TSK_TI_FLAGS]
and x2, x1, #_TIF_WORK_MASK
cbnz x2, work_pending
@@ -938,6 +951,7 @@ ENDPROC(ret_to_user)
*/
.align 6
el0_svc:
+ suspend_irq_prio_masking tmp=x1
mov x0, sp
bl el0_svc_handler
b ret_to_user
@@ -94,7 +94,7 @@ static void __cpu_do_idle_irqprio(void)
* be raised.
*/
pmr = gic_read_pmr();
- gic_write_pmr(GIC_PRIO_IRQON);
+ gic_write_pmr(GIC_PRIO_SUSPEND_MASKING);
__cpu_do_idle();
@@ -192,11 +192,13 @@ static void init_gic_priority_masking(void)
WARN_ON(!(cpuflags & PSR_I_BIT));
- gic_write_pmr(GIC_PRIO_IRQOFF);
-
/* We can only unmask PSR.I if we can take aborts */
- if (!(cpuflags & PSR_A_BIT))
+ if (!(cpuflags & PSR_A_BIT)) {
+ gic_write_pmr(GIC_PRIO_IRQOFF);
write_sysreg(cpuflags & ~PSR_I_BIT, daif);
+ } else {
+ gic_write_pmr(GIC_PRIO_SUSPEND_MASKING);
+ }
}
/*
@@ -533,7 +533,7 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
* Naturally, we want to avoid this.
*/
if (system_uses_irq_prio_masking()) {
- gic_write_pmr(GIC_PRIO_IRQON);
+ gic_write_pmr(GIC_PRIO_SUSPEND_MASKING);
dsb(sy);
}
When using IRQ priority masking to disable interrupts, in order to deal with the PSR.I state, local_irq_save() would convert the I bit into a PMR value (GIC_PRIO_IRQOFF). This resulted in local_irq_restore() potentially modifying the value of PMR in undesired location due to the state of PSR.I upon flag saving [1]. In an attempt to solve this issue in a less hackish manner, introduce new PMR values to represent possible states where interrupts are disabled: - GIC_PRIO_SUSPEND_MASKING represents sections of code where interrupt disabling relies on PSR.I instead of PMR. The value masks less IRQs than GIC_PRIO_IRQON in order to make sure IRQs are signaled to the CPU (but not taken). - GIC_PRIO_WAIT_GIC_BIT is used to indicate an action from the GIC is pending before interrupt status can be toggled. While this bit is set, PSR.I should also be set. [1] https://www.spinics.net/lists/arm-kernel/msg716956.html Fixes: commit 4a503217ce37 ("arm64: irqflags: Use ICC_PMR_EL1 for interrupt masking") Signed-off-by: Julien Thierry <julien.thierry@arm.com> Reported-by: Zenghui Yu <yuzenghui@huawei.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Wei Li <liwei391@huawei.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Christoffer Dall <christoffer.dall@arm.com> Cc: Marc Zyngier <marc.zyngier@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Suzuki K Pouloze <suzuki.poulose@arm.com> Cc: Oleg Nesterov <oleg@redhat.com> --- arch/arm64/include/asm/arch_gicv3.h | 4 +++- arch/arm64/include/asm/assembler.h | 18 +++++++++++++++++ arch/arm64/include/asm/daifflags.h | 16 +++++++++++---- arch/arm64/include/asm/irqflags.h | 39 +++++++++++-------------------------- arch/arm64/include/asm/kvm_host.h | 4 +++- arch/arm64/include/asm/ptrace.h | 24 +++++++++++++++++++++-- arch/arm64/kernel/entry.S | 22 +++++++++++++++++---- arch/arm64/kernel/process.c | 2 +- arch/arm64/kernel/smp.c | 8 +++++--- arch/arm64/kvm/hyp/switch.c | 2 +- 10 files changed, 94 insertions(+), 45 deletions(-) -- 1.9.1