@@ -85,6 +85,7 @@ static inline bool should_resched(int preempt_offset)
void preempt_schedule(void);
void preempt_schedule_notrace(void);
+void raw_irqentry_exit_cond_resched(void);
#ifdef CONFIG_PREEMPT_DYNAMIC
DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
@@ -95,6 +95,14 @@ static inline bool arm64_irqentry_exit_need_resched(void)
return true;
}
+void raw_irqentry_exit_cond_resched(void)
+{
+ if (!preempt_count()) {
+ if (need_resched() && arm64_irqentry_exit_need_resched())
+ preempt_schedule_irq();
+ }
+}
+
/*
* Handle IRQ/context state management when exiting to kernel mode.
* After this function returns it is not safe to call regular kernel code,
@@ -119,13 +127,8 @@ static void noinstr exit_to_kernel_mode(struct pt_regs *regs,
return;
}
- if (IS_ENABLED(CONFIG_PREEMPTION)) {
- if (!preempt_count()) {
- if (need_resched() &&
- arm64_irqentry_exit_need_resched())
- preempt_schedule_irq();
- }
- }
+ if (IS_ENABLED(CONFIG_PREEMPTION))
+ raw_irqentry_exit_cond_resched();
trace_hardirqs_on();
} else {
Extract the arm64 resched logic code to raw_irqentry_exit_cond_resched() function, which makes the code more clear when switch to generic entry. No functional changes. Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com> --- arch/arm64/include/asm/preempt.h | 1 + arch/arm64/kernel/entry-common.c | 17 ++++++++++------- 2 files changed, 11 insertions(+), 7 deletions(-)