diff mbox series

[RFC,08/86] Revert "arm64: Support PREEMPT_DYNAMIC"

Message ID 20231107215742.363031-9-ankur.a.arora@oracle.com (mailing list archive)
State New
Headers show
Series Make the kernel preemptible | expand

Commit Message

Ankur Arora Nov. 7, 2023, 9:56 p.m. UTC
This reverts commit 1b2d3451ee50a0968cb9933f726e50b368ba5073.

Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
---
 arch/arm64/Kconfig               |  1 -
 arch/arm64/include/asm/preempt.h | 19 ++-----------------
 arch/arm64/kernel/entry-common.c | 10 +---------
 3 files changed, 3 insertions(+), 27 deletions(-)

Comments

Steven Rostedt Nov. 7, 2023, 11:17 p.m. UTC | #1
On Tue,  7 Nov 2023 13:56:54 -0800
Ankur Arora <ankur.a.arora@oracle.com> wrote:

> This reverts commit 1b2d3451ee50a0968cb9933f726e50b368ba5073.
> 

I just realized that the maintainers of these patches are not being Cc'd.
If you want comments, you may want to Cc them. (I didn't do that for this
patch).

-- Steve


> Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
> ---
>  arch/arm64/Kconfig               |  1 -
>  arch/arm64/include/asm/preempt.h | 19 ++-----------------
>  arch/arm64/kernel/entry-common.c | 10 +---------
>  3 files changed, 3 insertions(+), 27 deletions(-)
> 
> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
> index 78f20e632712..856d7be2ee45 100644
> --- a/arch/arm64/Kconfig
> +++ b/arch/arm64/Kconfig
> @@ -221,7 +221,6 @@ config ARM64
>  	select HAVE_PERF_EVENTS_NMI if ARM64_PSEUDO_NMI
>  	select HAVE_PERF_REGS
>  	select HAVE_PERF_USER_STACK_DUMP
> -	select HAVE_PREEMPT_DYNAMIC_KEY
>  	select HAVE_REGS_AND_STACK_ACCESS_API
>  	select HAVE_POSIX_CPU_TIMERS_TASK_WORK
>  	select HAVE_FUNCTION_ARG_ACCESS_API
> diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h
> index 0159b625cc7f..e83f0982b99c 100644
> --- a/arch/arm64/include/asm/preempt.h
> +++ b/arch/arm64/include/asm/preempt.h
> @@ -2,7 +2,6 @@
>  #ifndef __ASM_PREEMPT_H
>  #define __ASM_PREEMPT_H
>  
> -#include <linux/jump_label.h>
>  #include <linux/thread_info.h>
>  
>  #define PREEMPT_NEED_RESCHED	BIT(32)
> @@ -81,24 +80,10 @@ static inline bool should_resched(int preempt_offset)
>  }
>  
>  #ifdef CONFIG_PREEMPTION
> -
>  void preempt_schedule(void);
> +#define __preempt_schedule() preempt_schedule()
>  void preempt_schedule_notrace(void);
> -
> -#ifdef CONFIG_PREEMPT_DYNAMIC
> -
> -DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
> -void dynamic_preempt_schedule(void);
> -#define __preempt_schedule()		dynamic_preempt_schedule()
> -void dynamic_preempt_schedule_notrace(void);
> -#define __preempt_schedule_notrace()	dynamic_preempt_schedule_notrace()
> -
> -#else /* CONFIG_PREEMPT_DYNAMIC */
> -
> -#define __preempt_schedule()		preempt_schedule()
> -#define __preempt_schedule_notrace()	preempt_schedule_notrace()
> -
> -#endif /* CONFIG_PREEMPT_DYNAMIC */
> +#define __preempt_schedule_notrace() preempt_schedule_notrace()
>  #endif /* CONFIG_PREEMPTION */
>  
>  #endif /* __ASM_PREEMPT_H */
> diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
> index 0fc94207e69a..5d9c9951562b 100644
> --- a/arch/arm64/kernel/entry-common.c
> +++ b/arch/arm64/kernel/entry-common.c
> @@ -225,17 +225,9 @@ static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
>  		lockdep_hardirqs_on(CALLER_ADDR0);
>  }
>  
> -#ifdef CONFIG_PREEMPT_DYNAMIC
> -DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
> -#define need_irq_preemption() \
> -	(static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
> -#else
> -#define need_irq_preemption()	(IS_ENABLED(CONFIG_PREEMPTION))
> -#endif
> -
>  static void __sched arm64_preempt_schedule_irq(void)
>  {
> -	if (!need_irq_preemption())
> +	if (!IS_ENABLED(CONFIG_PREEMPTION))
>  		return;
>  
>  	/*
Mark Rutland Nov. 8, 2023, 3:44 p.m. UTC | #2
On Tue, Nov 07, 2023 at 01:56:54PM -0800, Ankur Arora wrote:
> This reverts commit 1b2d3451ee50a0968cb9933f726e50b368ba5073.

As the author of the commit being reverted, I'd appreciate being Cc'd on
subsequent versions of this patch (and ideally, for the series as a whole).

Mark.

> 
> Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
> ---
>  arch/arm64/Kconfig               |  1 -
>  arch/arm64/include/asm/preempt.h | 19 ++-----------------
>  arch/arm64/kernel/entry-common.c | 10 +---------
>  3 files changed, 3 insertions(+), 27 deletions(-)
> 
> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
> index 78f20e632712..856d7be2ee45 100644
> --- a/arch/arm64/Kconfig
> +++ b/arch/arm64/Kconfig
> @@ -221,7 +221,6 @@ config ARM64
>  	select HAVE_PERF_EVENTS_NMI if ARM64_PSEUDO_NMI
>  	select HAVE_PERF_REGS
>  	select HAVE_PERF_USER_STACK_DUMP
> -	select HAVE_PREEMPT_DYNAMIC_KEY
>  	select HAVE_REGS_AND_STACK_ACCESS_API
>  	select HAVE_POSIX_CPU_TIMERS_TASK_WORK
>  	select HAVE_FUNCTION_ARG_ACCESS_API
> diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h
> index 0159b625cc7f..e83f0982b99c 100644
> --- a/arch/arm64/include/asm/preempt.h
> +++ b/arch/arm64/include/asm/preempt.h
> @@ -2,7 +2,6 @@
>  #ifndef __ASM_PREEMPT_H
>  #define __ASM_PREEMPT_H
>  
> -#include <linux/jump_label.h>
>  #include <linux/thread_info.h>
>  
>  #define PREEMPT_NEED_RESCHED	BIT(32)
> @@ -81,24 +80,10 @@ static inline bool should_resched(int preempt_offset)
>  }
>  
>  #ifdef CONFIG_PREEMPTION
> -
>  void preempt_schedule(void);
> +#define __preempt_schedule() preempt_schedule()
>  void preempt_schedule_notrace(void);
> -
> -#ifdef CONFIG_PREEMPT_DYNAMIC
> -
> -DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
> -void dynamic_preempt_schedule(void);
> -#define __preempt_schedule()		dynamic_preempt_schedule()
> -void dynamic_preempt_schedule_notrace(void);
> -#define __preempt_schedule_notrace()	dynamic_preempt_schedule_notrace()
> -
> -#else /* CONFIG_PREEMPT_DYNAMIC */
> -
> -#define __preempt_schedule()		preempt_schedule()
> -#define __preempt_schedule_notrace()	preempt_schedule_notrace()
> -
> -#endif /* CONFIG_PREEMPT_DYNAMIC */
> +#define __preempt_schedule_notrace() preempt_schedule_notrace()
>  #endif /* CONFIG_PREEMPTION */
>  
>  #endif /* __ASM_PREEMPT_H */
> diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
> index 0fc94207e69a..5d9c9951562b 100644
> --- a/arch/arm64/kernel/entry-common.c
> +++ b/arch/arm64/kernel/entry-common.c
> @@ -225,17 +225,9 @@ static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
>  		lockdep_hardirqs_on(CALLER_ADDR0);
>  }
>  
> -#ifdef CONFIG_PREEMPT_DYNAMIC
> -DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
> -#define need_irq_preemption() \
> -	(static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
> -#else
> -#define need_irq_preemption()	(IS_ENABLED(CONFIG_PREEMPTION))
> -#endif
> -
>  static void __sched arm64_preempt_schedule_irq(void)
>  {
> -	if (!need_irq_preemption())
> +	if (!IS_ENABLED(CONFIG_PREEMPTION))
>  		return;
>  
>  	/*
> -- 
> 2.31.1
>
diff mbox series

Patch

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 78f20e632712..856d7be2ee45 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -221,7 +221,6 @@  config ARM64
 	select HAVE_PERF_EVENTS_NMI if ARM64_PSEUDO_NMI
 	select HAVE_PERF_REGS
 	select HAVE_PERF_USER_STACK_DUMP
-	select HAVE_PREEMPT_DYNAMIC_KEY
 	select HAVE_REGS_AND_STACK_ACCESS_API
 	select HAVE_POSIX_CPU_TIMERS_TASK_WORK
 	select HAVE_FUNCTION_ARG_ACCESS_API
diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h
index 0159b625cc7f..e83f0982b99c 100644
--- a/arch/arm64/include/asm/preempt.h
+++ b/arch/arm64/include/asm/preempt.h
@@ -2,7 +2,6 @@ 
 #ifndef __ASM_PREEMPT_H
 #define __ASM_PREEMPT_H
 
-#include <linux/jump_label.h>
 #include <linux/thread_info.h>
 
 #define PREEMPT_NEED_RESCHED	BIT(32)
@@ -81,24 +80,10 @@  static inline bool should_resched(int preempt_offset)
 }
 
 #ifdef CONFIG_PREEMPTION
-
 void preempt_schedule(void);
+#define __preempt_schedule() preempt_schedule()
 void preempt_schedule_notrace(void);
-
-#ifdef CONFIG_PREEMPT_DYNAMIC
-
-DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
-void dynamic_preempt_schedule(void);
-#define __preempt_schedule()		dynamic_preempt_schedule()
-void dynamic_preempt_schedule_notrace(void);
-#define __preempt_schedule_notrace()	dynamic_preempt_schedule_notrace()
-
-#else /* CONFIG_PREEMPT_DYNAMIC */
-
-#define __preempt_schedule()		preempt_schedule()
-#define __preempt_schedule_notrace()	preempt_schedule_notrace()
-
-#endif /* CONFIG_PREEMPT_DYNAMIC */
+#define __preempt_schedule_notrace() preempt_schedule_notrace()
 #endif /* CONFIG_PREEMPTION */
 
 #endif /* __ASM_PREEMPT_H */
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index 0fc94207e69a..5d9c9951562b 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -225,17 +225,9 @@  static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
 		lockdep_hardirqs_on(CALLER_ADDR0);
 }
 
-#ifdef CONFIG_PREEMPT_DYNAMIC
-DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
-#define need_irq_preemption() \
-	(static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
-#else
-#define need_irq_preemption()	(IS_ENABLED(CONFIG_PREEMPTION))
-#endif
-
 static void __sched arm64_preempt_schedule_irq(void)
 {
-	if (!need_irq_preemption())
+	if (!IS_ENABLED(CONFIG_PREEMPTION))
 		return;
 
 	/*