Message ID | 20210602133040.334970485@infradead.org (mailing list archive) |
---|---|
State | Not Applicable |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | sched: Cleanup task_struct::state | expand |
Context | Check | Description |
---|---|---|
netdev/tree_selection | success | Not a local patch |
On Wed, Jun 02, 2021 at 03:12:27PM +0200, Peter Zijlstra wrote: > Replace a bunch of 'p->state == TASK_RUNNING' with a new helper: > task_is_running(p). > > Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> > --- > arch/x86/kernel/process.c | 4 ++-- > block/blk-mq.c | 2 +- > include/linux/sched.h | 2 ++ > kernel/locking/lockdep.c | 2 +- > kernel/rcu/tree_plugin.h | 2 +- > kernel/sched/core.c | 6 +++--- > kernel/sched/stats.h | 2 +- > kernel/signal.c | 2 +- > kernel/softirq.c | 3 +-- > mm/compaction.c | 2 +- > 10 files changed, 14 insertions(+), 13 deletions(-) > > --- a/arch/x86/kernel/process.c > +++ b/arch/x86/kernel/process.c > @@ -931,7 +931,7 @@ unsigned long get_wchan(struct task_stru > unsigned long start, bottom, top, sp, fp, ip, ret = 0; > int count = 0; > > - if (p == current || p->state == TASK_RUNNING) > + if (p == current || task_is_running(p)) Looks like this one in get_wchan() has been cargo-culted across most of arch/ so they'll need fixing up before you rename the struct member. There's also a weird one in tools/bpf/runqslower/runqslower.bpf.c (!) Will
On Wed, Jun 02, 2021 at 03:59:21PM +0100, Will Deacon wrote: > On Wed, Jun 02, 2021 at 03:12:27PM +0200, Peter Zijlstra wrote: > > Replace a bunch of 'p->state == TASK_RUNNING' with a new helper: > > task_is_running(p). > > > > Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> > > --- > > arch/x86/kernel/process.c | 4 ++-- > > block/blk-mq.c | 2 +- > > include/linux/sched.h | 2 ++ > > kernel/locking/lockdep.c | 2 +- > > kernel/rcu/tree_plugin.h | 2 +- > > kernel/sched/core.c | 6 +++--- > > kernel/sched/stats.h | 2 +- > > kernel/signal.c | 2 +- > > kernel/softirq.c | 3 +-- > > mm/compaction.c | 2 +- > > 10 files changed, 14 insertions(+), 13 deletions(-) > > > > --- a/arch/x86/kernel/process.c > > +++ b/arch/x86/kernel/process.c > > @@ -931,7 +931,7 @@ unsigned long get_wchan(struct task_stru > > unsigned long start, bottom, top, sp, fp, ip, ret = 0; > > int count = 0; > > > > - if (p == current || p->state == TASK_RUNNING) > > + if (p == current || task_is_running(p)) > > Looks like this one in get_wchan() has been cargo-culted across most of > arch/ so they'll need fixing up before you rename the struct member. Yeah, this was x86_64 allmodconfig driven, I've already got a bunch of robot mail telling me other archs need help, I'll fix it iup. > There's also a weird one in tools/bpf/runqslower/runqslower.bpf.c (!) I'm tempted to let the bpf people sort their own gunk. This is not an ABI. I so don't care breaking every script out there.
On Wed, 02 Jun 2021, Peter Zijlstra wrote: >Replace a bunch of 'p->state == TASK_RUNNING' with a new helper: >task_is_running(p). > >Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Davidlohr Bueso But afaict .... >--- > arch/x86/kernel/process.c | 4 ++-- > block/blk-mq.c | 2 +- > include/linux/sched.h | 2 ++ > kernel/locking/lockdep.c | 2 +- > kernel/rcu/tree_plugin.h | 2 +- > kernel/sched/core.c | 6 +++--- > kernel/sched/stats.h | 2 +- > kernel/signal.c | 2 +- > kernel/softirq.c | 3 +-- > mm/compaction.c | 2 +- > 10 files changed, 14 insertions(+), 13 deletions(-) there are also (on top of the already mentioned arch/): kernel/kcsan/report.c: const bool is_running = current->state == TASK_RUNNING; kernel/locking/lockdep.c: if (p->state == TASK_RUNNING && p != current)
--- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -931,7 +931,7 @@ unsigned long get_wchan(struct task_stru unsigned long start, bottom, top, sp, fp, ip, ret = 0; int count = 0; - if (p == current || p->state == TASK_RUNNING) + if (p == current || task_is_running(p)) return 0; if (!try_get_task_stack(p)) @@ -975,7 +975,7 @@ unsigned long get_wchan(struct task_stru goto out; } fp = READ_ONCE_NOCHECK(*(unsigned long *)fp); - } while (count++ < 16 && p->state != TASK_RUNNING); + } while (count++ < 16 && !task_is_running(p)); out: put_task_stack(p); --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -3926,7 +3926,7 @@ int blk_poll(struct request_queue *q, bl if (signal_pending_state(state, current)) __set_current_state(TASK_RUNNING); - if (current->state == TASK_RUNNING) + if (task_is_running(current)) return 1; if (ret < 0 || !spin) break; --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -113,6 +113,8 @@ struct task_group; __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \ TASK_PARKED) +#define task_is_running(task) (READ_ONCE((task)->state) == TASK_RUNNING) + #define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -760,7 +760,7 @@ static void lockdep_print_held_locks(str * It's not reliable to print a task's held locks if it's not sleeping * and it's not the current task. */ - if (p->state == TASK_RUNNING && p != current) + if (p != current && task_is_running(p)) return; for (i = 0; i < depth; i++) { printk(" #%d: ", i); --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -2768,7 +2768,7 @@ EXPORT_SYMBOL_GPL(rcu_bind_current_to_no #ifdef CONFIG_SMP static char *show_rcu_should_be_on_cpu(struct task_struct *tsp) { - return tsp && tsp->state == TASK_RUNNING && !tsp->on_cpu ? "!" : ""; + return tsp && task_is_running(tsp) && !tsp->on_cpu ? "!" : ""; } #else // #ifdef CONFIG_SMP static char *show_rcu_should_be_on_cpu(struct task_struct *tsp) --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5989,7 +5989,7 @@ static inline void sched_submit_work(str { unsigned int task_flags; - if (!tsk->state) + if (task_is_running(tsk)) return; task_flags = tsk->flags; @@ -7964,7 +7964,7 @@ int __sched yield_to(struct task_struct if (curr->sched_class != p->sched_class) goto out_unlock; - if (task_running(p_rq, p) || p->state) + if (task_running(p_rq, p) || !task_is_running(p)) goto out_unlock; yielded = curr->sched_class->yield_to_task(rq, p); @@ -8167,7 +8167,7 @@ void sched_show_task(struct task_struct pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p)); - if (p->state == TASK_RUNNING) + if (task_is_running(p)) pr_cont(" running task "); #ifdef CONFIG_DEBUG_STACK_USAGE free = stack_not_used(p); --- a/kernel/sched/stats.h +++ b/kernel/sched/stats.h @@ -217,7 +217,7 @@ static inline void sched_info_depart(str rq_sched_info_depart(rq, delta); - if (t->state == TASK_RUNNING) + if (task_is_running(t)) sched_info_enqueue(rq, t); } --- a/kernel/signal.c +++ b/kernel/signal.c @@ -4719,7 +4719,7 @@ void kdb_send_sig(struct task_struct *t, } new_t = kdb_prev_t != t; kdb_prev_t = t; - if (t->state != TASK_RUNNING && new_t) { + if (!task_is_running(t) && new_t) { spin_unlock(&t->sighand->siglock); kdb_printf("Process is not RUNNING, sending a signal from " "kdb risks deadlock\n" --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -92,8 +92,7 @@ static bool ksoftirqd_running(unsigned l if (pending & SOFTIRQ_NOW_MASK) return false; - return tsk && (tsk->state == TASK_RUNNING) && - !__kthread_should_park(tsk); + return tsk && task_is_running(tsk) && !__kthread_should_park(tsk); } #ifdef CONFIG_TRACE_IRQFLAGS --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1955,7 +1955,7 @@ static inline bool is_via_compact_memory static bool kswapd_is_running(pg_data_t *pgdat) { - return pgdat->kswapd && (pgdat->kswapd->state == TASK_RUNNING); + return pgdat->kswapd && task_is_running(pgdat->kswapd); } /*
Replace a bunch of 'p->state == TASK_RUNNING' with a new helper: task_is_running(p). Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> --- arch/x86/kernel/process.c | 4 ++-- block/blk-mq.c | 2 +- include/linux/sched.h | 2 ++ kernel/locking/lockdep.c | 2 +- kernel/rcu/tree_plugin.h | 2 +- kernel/sched/core.c | 6 +++--- kernel/sched/stats.h | 2 +- kernel/signal.c | 2 +- kernel/softirq.c | 3 +-- mm/compaction.c | 2 +- 10 files changed, 14 insertions(+), 13 deletions(-)