@@ -17,6 +17,8 @@
#define DAIF_PROCCTX_NOIRQ (PSR_I_BIT | PSR_F_BIT)
#define DAIF_ERRCTX (PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
#define DAIF_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
+#define DAIF_ALLINT_MASK \
+ (system_uses_nmi() ? (ALLINT_ALLINT | DAIF_MASK) : (DAIF_MASK))
/*
* For Arm64 processor support Armv8.8 or later, kernel supports three types
@@ -187,13 +187,13 @@ static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
struct pt_regs *regs)
{
kcb->saved_irqflag = regs->pstate & DAIF_MASK;
- regs->pstate |= DAIF_MASK;
+ regs->pstate |= DAIF_ALLINT_MASK;
}
static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
struct pt_regs *regs)
{
- regs->pstate &= ~DAIF_MASK;
+ regs->pstate &= ~DAIF_ALLINT_MASK;
regs->pstate |= kcb->saved_irqflag;
}
Keeping NMI maskable while executing instruction out of line, otherwise, add kprobe on the functions invoken while handling NMI will cause kprobe reenter bug and kernel panic. Signed-off-by: Liao Chang <liaochang1@huawei.com> --- arch/arm64/include/asm/daifflags.h | 2 ++ arch/arm64/kernel/probes/kprobes.c | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-)