@@ -840,6 +840,21 @@ config FORCE_MAX_ZONEORDER
However for 4K, we choose a higher default value, 11 as opposed to 10, giving us
4M allocations matching the default size used by generic code.
+config USE_ICC_SYSREGS_FOR_IRQFLAGS
+ bool "Use ICC system registers for IRQ masking"
+ select CONFIG_ARM_GIC_V3
+ help
+ Using the ICC system registers for IRQ masking makes it possible
+ to simulate NMI on ARM64 systems. This allows several interesting
+ features (especially debug features) to be used on these systems.
+
+ Say Y here to implement IRQ masking using ICC system
+ registers when the GIC System Registers are available. The changes
+ are applied dynamically using the alternatives system so it is safe
+ to enable this option on systems with older interrupt controllers.
+
+ If unsure, say N
+
menuconfig ARMV8_DEPRECATED
bool "Emulate deprecated/obsolete ARMv8 instructions"
depends on COMPAT
@@ -76,6 +76,11 @@ static inline u64 gic_read_iar_cavium_thunderx(void)
return irqstat;
}
+static inline u32 gic_read_pmr(void)
+{
+ return read_sysreg_s(SYS_ICC_PMR_EL1);
+}
+
static inline void gic_write_pmr(u32 val)
{
write_sysreg_s(val, SYS_ICC_PMR_EL1);
@@ -145,5 +150,37 @@ static inline void gic_write_bpr1(u32 val)
#define gits_write_vpendbaser(v, c) writeq_relaxed(v, c)
#define gits_read_vpendbaser(c) readq_relaxed(c)
+#ifdef CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS
+static inline void gic_start_pmr_masking(void)
+{
+ if (cpus_have_const_cap(ARM64_HAS_SYSREG_GIC_CPUIF)) {
+ gic_write_pmr(ICC_PMR_EL1_MASKED);
+ asm volatile ("msr daifclr, #2" : : : "memory");
+ }
+}
+
+static inline u32 gic_pmr_save_and_unmask(void)
+{
+ if (cpus_have_const_cap(ARM64_HAS_SYSREG_GIC_CPUIF)) {
+ u32 old;
+
+ old = gic_read_pmr();
+ gic_write_pmr(ICC_PMR_EL1_UNMASKED);
+ dsb(sy);
+
+ return old;
+ } else {
+ /* Idle priority, no masking */
+ return ICC_PMR_EL1_UNMASKED;
+ }
+}
+
+static inline void gic_pmr_restore(u32 pmr)
+{
+ if (cpus_have_const_cap(ARM64_HAS_SYSREG_GIC_CPUIF))
+ gic_write_pmr(pmr);
+}
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_ARCH_GICV3_H */
@@ -23,6 +23,7 @@
#ifndef __ASM_ASSEMBLER_H
#define __ASM_ASSEMBLER_H
+#include <asm/alternative.h>
#include <asm/asm-offsets.h>
#include <asm/cpufeature.h>
#include <asm/debug-monitors.h>
@@ -63,12 +64,32 @@
/*
* Enable and disable interrupts.
*/
- .macro disable_irq
+ .macro disable_irq, tmp
+#ifdef CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS
+ mov \tmp, #ICC_PMR_EL1_MASKED
+alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF
msr daifset, #2
+alternative_else
+ msr_s SYS_ICC_PMR_EL1, \tmp
+alternative_endif
+#else
+ msr daifset, #2
+#endif
.endm
- .macro enable_irq
+ .macro enable_irq, tmp
+#ifdef CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS
+ mov \tmp, #ICC_PMR_EL1_UNMASKED
+alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF
msr daifclr, #2
+ nop
+alternative_else
+ msr_s SYS_ICC_PMR_EL1, \tmp
+ dsb sy
+alternative_endif
+#else
+ msr daifclr, #2
+#endif
.endm
.macro save_and_disable_irq, flags
@@ -18,9 +18,24 @@
#include <linux/irqflags.h>
+#ifndef CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS
+
#define DAIF_PROCCTX 0
#define DAIF_PROCCTX_NOIRQ PSR_I_BIT
+#else
+
+#define DAIF_PROCCTX \
+ (cpus_have_const_cap(ARM64_HAS_SYSREG_GIC_CPUIF) ? \
+ MAKE_ARCH_FLAGS(0, ICC_PMR_EL1_UNMASKED) : \
+ 0)
+
+#define DAIF_PROCCTX_NOIRQ \
+ (cpus_have_const_cap(ARM64_HAS_SYSREG_GIC_CPUIF) ? \
+ MAKE_ARCH_FLAGS(0, ICC_PMR_EL1_MASKED) : \
+ PSR_I_BIT)
+#endif
+
/* mask/save/unmask/restore all exceptions, including interrupts. */
static inline void local_daif_mask(void)
{
@@ -36,11 +51,8 @@ static inline unsigned long local_daif_save(void)
{
unsigned long flags;
- asm volatile(
- "mrs %0, daif // local_daif_save\n"
- : "=r" (flags)
- :
- : "memory");
+ flags = arch_local_save_flags();
+
local_daif_mask();
return flags;
@@ -54,17 +66,21 @@ static inline void local_daif_unmask(void)
:
:
: "memory");
+
+#ifdef CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS
+ /* Unmask IRQs in PMR if needed */
+ if (cpus_have_const_cap(ARM64_HAS_SYSREG_GIC_CPUIF))
+ arch_local_irq_enable();
+#endif
}
static inline void local_daif_restore(unsigned long flags)
{
if (!arch_irqs_disabled_flags(flags))
trace_hardirqs_on();
- asm volatile(
- "msr daif, %0 // local_daif_restore"
- :
- : "r" (flags)
- : "memory");
+
+ arch_local_irq_restore(flags);
+
if (arch_irqs_disabled_flags(flags))
trace_hardirqs_off();
}
@@ -40,7 +40,12 @@
efi_virtmap_unload(); \
})
+#ifdef CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS
+#define ARCH_EFI_IRQ_FLAGS_MASK \
+ (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | ARCH_FLAG_PMR_EN)
+#else
#define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
+#endif
/* arch specific definitions used by the stub code */
@@ -18,7 +18,10 @@
#ifdef __KERNEL__
+#include <asm/alternative.h>
+#include <asm/cpufeature.h>
#include <asm/ptrace.h>
+#include <asm/sysreg.h>
/*
* Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and
@@ -33,6 +36,7 @@
* unmask it at all other times.
*/
+#ifndef CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS
/*
* CPU interrupt mask handling.
*/
@@ -96,5 +100,126 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
{
return flags & PSR_I_BIT;
}
+
+static inline void maybe_switch_to_sysreg_gic_cpuif(void) {}
+
+#else /* CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS */
+
+#define ARCH_FLAG_PMR_EN 0x1
+
+#define MAKE_ARCH_FLAGS(daif, pmr) \
+ ((daif) | (((pmr) >> ICC_PMR_EL1_EN_SHIFT) & ARCH_FLAG_PMR_EN))
+
+#define ARCH_FLAGS_GET_PMR(flags) \
+ ((((flags) & ARCH_FLAG_PMR_EN) << ICC_PMR_EL1_EN_SHIFT) \
+ | ICC_PMR_EL1_MASKED)
+
+#define ARCH_FLAGS_GET_DAIF(flags) ((flags) & ~ARCH_FLAG_PMR_EN)
+
+/*
+ * CPU interrupt mask handling.
+ */
+static inline unsigned long arch_local_irq_save(void)
+{
+ unsigned long flags, masked = ICC_PMR_EL1_MASKED;
+ unsigned long pmr = 0;
+
+ asm volatile(ALTERNATIVE(
+ "mrs %0, daif // arch_local_irq_save\n"
+ "msr daifset, #2\n"
+ "mov %1, #" __stringify(ICC_PMR_EL1_UNMASKED),
+ /* --- */
+ "mrs %0, daif\n"
+ "mrs_s %1, " __stringify(SYS_ICC_PMR_EL1) "\n"
+ "msr_s " __stringify(SYS_ICC_PMR_EL1) ", %2",
+ ARM64_HAS_SYSREG_GIC_CPUIF)
+ : "=&r" (flags), "=&r" (pmr)
+ : "r" (masked)
+ : "memory");
+
+ return MAKE_ARCH_FLAGS(flags, pmr);
+}
+
+static inline void arch_local_irq_enable(void)
+{
+ unsigned long unmasked = ICC_PMR_EL1_UNMASKED;
+
+ asm volatile(ALTERNATIVE(
+ "msr daifclr, #2 // arch_local_irq_enable\n"
+ "nop",
+ "msr_s " __stringify(SYS_ICC_PMR_EL1) ",%0\n"
+ "dsb sy",
+ ARM64_HAS_SYSREG_GIC_CPUIF)
+ :
+ : "r" (unmasked)
+ : "memory");
+}
+
+static inline void arch_local_irq_disable(void)
+{
+ unsigned long masked = ICC_PMR_EL1_MASKED;
+
+ asm volatile(ALTERNATIVE(
+ "msr daifset, #2 // arch_local_irq_disable",
+ "msr_s " __stringify(SYS_ICC_PMR_EL1) ",%0",
+ ARM64_HAS_SYSREG_GIC_CPUIF)
+ :
+ : "r" (masked)
+ : "memory");
+}
+
+/*
+ * Save the current interrupt enable state.
+ */
+static inline unsigned long arch_local_save_flags(void)
+{
+ unsigned long flags;
+ unsigned long pmr = 0;
+
+ asm volatile(ALTERNATIVE(
+ "mrs %0, daif // arch_local_save_flags\n"
+ "mov %1, #" __stringify(ICC_PMR_EL1_UNMASKED),
+ "mrs %0, daif\n"
+ "mrs_s %1, " __stringify(SYS_ICC_PMR_EL1),
+ ARM64_HAS_SYSREG_GIC_CPUIF)
+ : "=r" (flags), "=r" (pmr)
+ :
+ : "memory");
+
+ return MAKE_ARCH_FLAGS(flags, pmr);
+}
+
+/*
+ * restore saved IRQ state
+ */
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+ unsigned long pmr = ARCH_FLAGS_GET_PMR(flags);
+
+ flags = ARCH_FLAGS_GET_DAIF(flags);
+
+ asm volatile(ALTERNATIVE(
+ "msr daif, %0 // arch_local_irq_restore\n"
+ "nop\n"
+ "nop",
+ "msr daif, %0\n"
+ "msr_s " __stringify(SYS_ICC_PMR_EL1) ",%1\n"
+ "dsb sy",
+ ARM64_HAS_SYSREG_GIC_CPUIF)
+ :
+ : "r" (flags), "r" (pmr)
+ : "memory");
+}
+
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+ return (ARCH_FLAGS_GET_DAIF(flags) & (PSR_I_BIT)) |
+ !(ARCH_FLAGS_GET_PMR(flags) & ICC_PMR_EL1_EN_BIT);
+}
+
+void maybe_switch_to_sysreg_gic_cpuif(void);
+
+#endif /* CONFIG_IRQFLAGS_GIC_MASKING */
+
#endif
#endif
@@ -137,6 +137,10 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
memset(regs, 0, sizeof(*regs));
forget_syscall(regs);
regs->pc = pc;
+#ifdef CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS
+ /* Have IRQs enabled by default */
+ regs->pmr_save = ICC_PMR_EL1_UNMASKED;
+#endif
}
static inline void start_thread(struct pt_regs *regs, unsigned long pc,
@@ -25,6 +25,12 @@
#define CurrentEL_EL1 (1 << 2)
#define CurrentEL_EL2 (2 << 2)
+/* PMR values used to mask/unmask interrupts */
+#define ICC_PMR_EL1_EN_SHIFT 6
+#define ICC_PMR_EL1_EN_BIT (1 << ICC_PMR_EL1_EN_SHIFT) // PMR IRQ enable
+#define ICC_PMR_EL1_UNMASKED 0xf0
+#define ICC_PMR_EL1_MASKED (ICC_PMR_EL1_UNMASKED ^ ICC_PMR_EL1_EN_BIT)
+
/* AArch32-specific ptrace requests */
#define COMPAT_PTRACE_GETREGS 12
#define COMPAT_PTRACE_SETREGS 13
@@ -136,7 +142,7 @@ struct pt_regs {
#endif
u64 orig_addr_limit;
- u64 unused; // maintain 16 byte alignment
+ u64 pmr_save;
u64 stackframe[2];
};
@@ -171,8 +177,14 @@ static inline void forget_syscall(struct pt_regs *regs)
#define processor_mode(regs) \
((regs)->pstate & PSR_MODE_MASK)
+#ifndef CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS
#define interrupts_enabled(regs) \
(!((regs)->pstate & PSR_I_BIT))
+#else
+#define interrupts_enabled(regs) \
+ ((!((regs)->pstate & PSR_I_BIT)) && \
+ ((regs)->pmr_save & ICC_PMR_EL1_EN_BIT))
+#endif
#define fast_interrupts_enabled(regs) \
(!((regs)->pstate & PSR_F_BIT))
@@ -75,6 +75,7 @@ int main(void)
DEFINE(S_ORIG_X0, offsetof(struct pt_regs, orig_x0));
DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno));
DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit));
+ DEFINE(S_PMR_SAVE, offsetof(struct pt_regs, pmr_save));
DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe));
DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
BLANK();
@@ -20,6 +20,7 @@
#include <linux/init.h>
#include <linux/linkage.h>
+#include <linux/irqchip/arm-gic-v3.h>
#include <asm/alternative.h>
#include <asm/assembler.h>
@@ -210,6 +211,16 @@ alternative_else_nop_endif
msr sp_el0, tsk
.endif
+#ifdef CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS
+ /* Save pmr */
+alternative_if ARM64_HAS_SYSREG_GIC_CPUIF
+ mrs_s x20, SYS_ICC_PMR_EL1
+alternative_else
+ mov x20, #ICC_PMR_EL1_UNMASKED
+alternative_endif
+ str x20, [sp, #S_PMR_SAVE]
+#endif
+
/*
* Registers that may be useful after this macro is invoked:
*
@@ -220,9 +231,9 @@ alternative_else_nop_endif
.endm
.macro kernel_exit, el
- .if \el != 0
disable_daif
+ .if \el != 0
/* Restore the task's original addr_limit. */
ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
str x20, [tsk, #TSK_TI_ADDR_LIMIT]
@@ -230,6 +241,15 @@ alternative_else_nop_endif
/* No need to restore UAO, it will be restored from SPSR_EL1 */
.endif
+#ifdef CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS
+ /* Restore pmr, ensuring IRQs are off before restoring context. */
+alternative_if ARM64_HAS_SYSREG_GIC_CPUIF
+ ldr x20, [sp, #S_PMR_SAVE]
+ msr_s SYS_ICC_PMR_EL1, x20
+ dsb sy
+alternative_else_nop_endif
+#endif
+
ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
.if \el == 0
ct_user_enter
@@ -820,7 +840,7 @@ ENDPROC(el0_error)
* and this includes saving x0 back into the kernel stack.
*/
ret_fast_syscall:
- disable_daif
+ disable_irq x21 // disable interrupts
str x0, [sp, #S_X0] // returned x0
ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for syscall tracing
and x2, x1, #_TIF_SYSCALL_WORK
@@ -830,7 +850,7 @@ ret_fast_syscall:
enable_step_tsk x1, x2
kernel_exit 0
ret_fast_syscall_trace:
- enable_daif
+ enable_irq x0 // enable interrupts
b __sys_trace_return_skipped // we already saved x0
/*
@@ -848,7 +868,7 @@ work_pending:
* "slow" syscall return path.
*/
ret_to_user:
- disable_daif
+ disable_irq x21 // disable interrupts
ldr x1, [tsk, #TSK_TI_FLAGS]
and x2, x1, #_TIF_WORK_MASK
cbnz x2, work_pending
@@ -563,6 +563,44 @@ set_cpu_boot_mode_flag:
ret
ENDPROC(set_cpu_boot_mode_flag)
+#ifdef CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS
+/*
+ * void maybe_switch_to_sysreg_gic_cpuif(void)
+ *
+ * Enable interrupt controller system register access if this feature
+ * has been detected by the alternatives system.
+ *
+ * Before we jump into generic code we must enable interrupt controller system
+ * register access because this is required by the irqflags macros. We must
+ * also mask interrupts at the PMR and unmask them within the PSR. That leaves
+ * us set up and ready for the kernel to make its first call to
+ * arch_local_irq_enable().
+
+ *
+ */
+ENTRY(maybe_switch_to_sysreg_gic_cpuif)
+alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF
+ b 1f
+alternative_else
+ mrs_s x0, SYS_ICC_SRE_EL1
+alternative_endif
+ orr x0, x0, #1
+ msr_s SYS_ICC_SRE_EL1, x0 // Set ICC_SRE_EL1.SRE==1
+ isb // Make sure SRE is now set
+ mrs x0, daif
+ tbz x0, #7, no_mask_pmr // Are interrupts on?
+ mov x0, ICC_PMR_EL1_MASKED
+ msr_s SYS_ICC_PMR_EL1, x0 // Prepare for unmask of I bit
+ msr daifclr, #2 // Clear the I bit
+ b 1f
+no_mask_pmr:
+ mov x0, ICC_PMR_EL1_UNMASKED
+ msr_s SYS_ICC_PMR_EL1, x0
+1:
+ ret
+ENDPROC(maybe_switch_to_sysreg_gic_cpuif)
+#endif
+
/*
* These values are written with the MMU off, but read with the MMU on.
* Writers will invalidate the corresponding address, discarding up to a
@@ -66,6 +66,8 @@
EXPORT_SYMBOL(__stack_chk_guard);
#endif
+#include <asm/arch_gicv3.h>
+
/*
* Function pointers to optional machine specific functions
*/
@@ -224,6 +226,7 @@ void __show_regs(struct pt_regs *regs)
print_symbol("pc : %s\n", regs->pc);
print_symbol("lr : %s\n", lr);
printk("sp : %016llx\n", sp);
+ printk("pmr_save: %08llx\n", regs->pmr_save);
i = top_reg;
@@ -340,6 +343,9 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
} else {
memset(childregs, 0, sizeof(struct pt_regs));
childregs->pstate = PSR_MODE_EL1h;
+#ifdef CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS
+ childregs->pmr_save = ICC_PMR_EL1_UNMASKED;
+#endif
if (IS_ENABLED(CONFIG_ARM64_UAO) &&
cpus_have_const_cap(ARM64_HAS_UAO))
childregs->pstate |= PSR_UAO_BIT;
@@ -221,6 +221,8 @@ asmlinkage void secondary_start_kernel(void)
struct mm_struct *mm = &init_mm;
unsigned int cpu;
+ maybe_switch_to_sysreg_gic_cpuif();
+
cpu = task_cpu(current);
set_my_cpu_offset(per_cpu_offset(cpu));
@@ -459,6 +461,12 @@ void __init smp_prepare_boot_cpu(void)
* and/or scheduling is enabled.
*/
apply_alternatives_early();
+
+ /*
+ * Conditionally switch to GIC PMR for interrupt masking (this
+ * will be a nop if we are using normal interrupt masking)
+ */
+ maybe_switch_to_sysreg_gic_cpuif();
}
static u64 __init of_get_cpu_mpidr(struct device_node *dn)
@@ -42,7 +42,27 @@
.endm
ENTRY(__vhe_hyp_call)
+#ifdef CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS
+alternative_if ARM64_HAS_SYSREG_GIC_CPUIF
+ /*
+ * In non-VHE, trapping to EL2 will set the PSR.I bit.
+ * Force it here whenever we are playing with PMR.
+ */
+ str x19, [sp, #-16]!
+ mrs x19, daif
+ msr daifset, #2
+alternative_else_nop_endif
+#endif
+
do_el2_call
+
+#ifdef CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS
+alternative_if ARM64_HAS_SYSREG_GIC_CPUIF
+ msr daif, x19
+ ldr x19, [sp], #16
+alternative_else_nop_endif
+#endif
+
/*
* We used to rely on having an exception return to get
* an implicit isb. In the E2H case, we don't have it anymore.
@@ -18,6 +18,9 @@
#include <linux/types.h>
#include <linux/jump_label.h>
+#ifdef CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS
+#include <asm/arch_gicv3.h>
+#endif
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
@@ -290,6 +293,19 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
struct kvm_cpu_context *guest_ctxt;
bool fp_enabled;
u64 exit_code;
+#ifdef CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS
+ u32 pmr_save;
+#endif
+
+#ifdef CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS
+ /*
+ * Having IRQs masked via PMR when entering the guest means the GIC
+ * will not signal the CPU of interrupts of lower priority, and the
+ * only way to get out will be via guest exceptions.
+ * Naturally, we want to avoid this.
+ */
+ pmr_save = gic_pmr_save_and_unmask();
+#endif
vcpu = kern_hyp_va(vcpu);
write_sysreg(vcpu, tpidr_el2);
@@ -394,6 +410,11 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
*/
__debug_cond_restore_host_state(vcpu);
+#ifdef CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS
+ /* PMR was unmasked, no need for dsb */
+ gic_pmr_restore(pmr_save);
+#endif
+
return exit_code;
}
@@ -20,6 +20,7 @@
#include <linux/init.h>
#include <linux/linkage.h>
+#include <linux/irqchip/arm-gic-v3.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/hwcap.h>
@@ -47,11 +48,33 @@
* cpu_do_idle()
*
* Idle the processor (wait for interrupt).
+ *
+ * If CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS is set we must do additional
+ * work to ensure that interrupts are not masked at the PMR (because the
+ * core will not wake up if we block the wake up signal in the interrupt
+ * controller).
*/
ENTRY(cpu_do_idle)
+#ifdef CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS
+alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF
+#endif
+ dsb sy // WFI may enter a low-power mode
+ wfi
+ ret
+#ifdef CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS
+alternative_else
+ mrs x0, daif // save I bit
+ msr daifset, #2 // set I bit
+ mrs_s x1, SYS_ICC_PMR_EL1 // save PMR
+alternative_endif
+ mov x2, #ICC_PMR_EL1_UNMASKED
+ msr_s SYS_ICC_PMR_EL1, x2 // unmask at PMR
dsb sy // WFI may enter a low-power mode
wfi
+ msr_s SYS_ICC_PMR_EL1, x1 // restore PMR
+ msr daif, x0 // restore I bit
ret
+#endif
ENDPROC(cpu_do_idle)
#ifdef CONFIG_CPU_PM
@@ -60,7 +60,7 @@
#define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
#define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
-#define LPI_PROP_DEFAULT_PRIO 0xa0
+#define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI
/*
* Collection structure - just an ID, and a redistributor address to
@@ -71,9 +71,6 @@ struct gic_chip_data {
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
-/* Our default, arbitrary priority value. Linux only uses one anyway. */
-#define DEFAULT_PMR_VALUE 0xf0
-
static inline unsigned int gic_irq(struct irq_data *d)
{
return d->hwirq;
@@ -348,48 +345,55 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
{
u32 irqnr;
- do {
- irqnr = gic_read_iar();
+ irqnr = gic_read_iar();
- if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) {
- int err;
+#ifdef CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS
+ isb();
+ /* Masking IRQs earlier would prevent to ack the current interrupt */
+ gic_start_pmr_masking();
+#endif
- if (static_key_true(&supports_deactivate))
+ if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) {
+ int err;
+
+ if (static_key_true(&supports_deactivate))
+ gic_write_eoir(irqnr);
+ else {
+#ifndef CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS
+ isb();
+#endif
+ }
+
+ err = handle_domain_irq(gic_data.domain, irqnr, regs);
+ if (err) {
+ WARN_ONCE(true, "Unexpected interrupt received!\n");
+ if (static_key_true(&supports_deactivate)) {
+ if (irqnr < 8192)
+ gic_write_dir(irqnr);
+ } else {
gic_write_eoir(irqnr);
- else
- isb();
-
- err = handle_domain_irq(gic_data.domain, irqnr, regs);
- if (err) {
- WARN_ONCE(true, "Unexpected interrupt received!\n");
- if (static_key_true(&supports_deactivate)) {
- if (irqnr < 8192)
- gic_write_dir(irqnr);
- } else {
- gic_write_eoir(irqnr);
- }
}
- continue;
}
- if (irqnr < 16) {
- gic_write_eoir(irqnr);
- if (static_key_true(&supports_deactivate))
- gic_write_dir(irqnr);
+ return;
+ }
+ if (irqnr < 16) {
+ gic_write_eoir(irqnr);
+ if (static_key_true(&supports_deactivate))
+ gic_write_dir(irqnr);
#ifdef CONFIG_SMP
- /*
- * Unlike GICv2, we don't need an smp_rmb() here.
- * The control dependency from gic_read_iar to
- * the ISB in gic_write_eoir is enough to ensure
- * that any shared data read by handle_IPI will
- * be read after the ACK.
- */
- handle_IPI(irqnr, regs);
+ /*
+ * Unlike GICv2, we don't need an smp_rmb() here.
+ * The control dependency from gic_read_iar to
+ * the ISB in gic_write_eoir is enough to ensure
+ * that any shared data read by handle_IPI will
+ * be read after the ACK.
+ */
+ handle_IPI(irqnr, regs);
#else
- WARN_ONCE(true, "Unexpected SGI received!\n");
+ WARN_ONCE(true, "Unexpected SGI received!\n");
#endif
- continue;
- }
- } while (irqnr != ICC_IAR1_EL1_SPURIOUS);
+ return;
+ }
}
static void __init gic_dist_init(void)
@@ -543,8 +547,10 @@ static void gic_cpu_sys_reg_init(void)
if (!gic_enable_sre())
pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
+#ifndef CONFIG_USE_ICC_SYSREGS_FOR_IRQFLAGS
/* Set priority mask register */
- gic_write_pmr(DEFAULT_PMR_VALUE);
+ gic_write_pmr(ICC_PMR_EL1_UNMASKED);
+#endif
/*
* Some firmwares hand over to the kernel with the BPR changed from
@@ -13,6 +13,12 @@
#include <linux/types.h>
#include <linux/ioport.h>
+#define GICD_INT_DEF_PRI 0xc0
+#define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\
+ (GICD_INT_DEF_PRI << 16) |\
+ (GICD_INT_DEF_PRI << 8) |\
+ GICD_INT_DEF_PRI)
+
enum gic_type {
GIC_V2,
GIC_V3,
@@ -65,11 +65,6 @@
#define GICD_INT_EN_CLR_X32 0xffffffff
#define GICD_INT_EN_SET_SGI 0x0000ffff
#define GICD_INT_EN_CLR_PPI 0xffff0000
-#define GICD_INT_DEF_PRI 0xa0
-#define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\
- (GICD_INT_DEF_PRI << 16) |\
- (GICD_INT_DEF_PRI << 8) |\
- GICD_INT_DEF_PRI)
#define GICH_HCR 0x0
#define GICH_VTR 0x4