@@ -3706,8 +3706,7 @@ asmlinkage __visible void schedule_tail(
* finish_task_switch() for details.
*
* finish_task_switch() will drop rq->lock() and lower preempt_count
- * and the preempt_enable() will end up enabling preemption (on
- * PREEMPT_COUNT kernels).
+ * and the preempt_enable() will end up enabling preemption.
*/
rq = finish_task_switch(prev);
@@ -7311,9 +7310,6 @@ void __cant_sleep(const char *file, int
if (irqs_disabled())
return;
- if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
- return;
-
if (preempt_count() > preempt_offset)
return;
@@ -1320,7 +1320,6 @@ config DEBUG_LOCKDEP
config DEBUG_ATOMIC_SLEEP
bool "Sleep inside atomic section checking"
- select PREEMPT_COUNT
depends on DEBUG_KERNEL
help
If you say Y here, various routines which may sleep will become very
CONFIG_PREEMPT_COUNT is now unconditionally enabled and will be removed. Cleanup the leftovers before doing so. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Juri Lelli <juri.lelli@redhat.com> Cc: Vincent Guittot <vincent.guittot@linaro.org> Cc: Dietmar Eggemann <dietmar.eggemann@arm.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Ben Segall <bsegall@google.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Daniel Bristot de Oliveira <bristot@redhat.com> --- kernel/sched/core.c | 6 +----- lib/Kconfig.debug | 1 - 2 files changed, 1 insertion(+), 6 deletions(-)