diff mbox series

[RFC,11/20] kthread: Make sure kthread hasn't started while binding it

Message ID 20240726215701.19459-12-frederic@kernel.org (mailing list archive)
State New
Headers show
Series None | expand

Commit Message

Frederic Weisbecker July 26, 2024, 9:56 p.m. UTC
Make sure the kthread is sleeping in the schedule_preempt_disabled()
call before calling its handler when kthread_bind[_mask]() is called
on it. This provides a sanity check verifying that the task is not
randomly blocked later at some point within its function handler, in
which case it could be just concurrently awaken, leaving the call to
do_set_cpus_allowed() without any effect until the next voluntary sleep.

Rely on the wake-up ordering to ensure that the newly introduced "started"
field returns the expected value:

    TASK A                                   TASK B
    ------                                   ------
READ kthread->started
wake_up_process(B)
   rq_lock()
   ...
   rq_unlock() // RELEASE
                                           schedule()
                                              rq_lock() // ACQUIRE
                                              // schedule task B
                                              rq_unlock()
                                              WRITE kthread->started

Similarly, writing kthread->started before subsequent voluntary sleeps
will be visible after calling wait_task_inactive() in
__kthread_bind_mask(), reporting potential misuse of the API.

Upcoming patches will make further use of this facility.

Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
---
 kernel/kthread.c | 7 +++++++
 1 file changed, 7 insertions(+)

Comments

Vlastimil Babka July 30, 2024, 3:20 p.m. UTC | #1
On 7/26/24 11:56 PM, Frederic Weisbecker wrote:
> Make sure the kthread is sleeping in the schedule_preempt_disabled()
> call before calling its handler when kthread_bind[_mask]() is called
> on it. This provides a sanity check verifying that the task is not
> randomly blocked later at some point within its function handler, in
> which case it could be just concurrently awaken, leaving the call to
> do_set_cpus_allowed() without any effect until the next voluntary sleep.
> 
> Rely on the wake-up ordering to ensure that the newly introduced "started"
> field returns the expected value:
> 
>     TASK A                                   TASK B
>     ------                                   ------
> READ kthread->started
> wake_up_process(B)
>    rq_lock()
>    ...
>    rq_unlock() // RELEASE
>                                            schedule()
>                                               rq_lock() // ACQUIRE
>                                               // schedule task B
>                                               rq_unlock()
>                                               WRITE kthread->started
> 
> Similarly, writing kthread->started before subsequent voluntary sleeps
> will be visible after calling wait_task_inactive() in
> __kthread_bind_mask(), reporting potential misuse of the API.
> 
> Upcoming patches will make further use of this facility.
> 
> Signed-off-by: Frederic Weisbecker <frederic@kernel.org>

Acked-by: Vlastimil Babka <vbabka@suse.cz>

> ---
>  kernel/kthread.c | 7 +++++++
>  1 file changed, 7 insertions(+)
> 
> diff --git a/kernel/kthread.c b/kernel/kthread.c
> index f7be976ff88a..ecb719f54f7a 100644
> --- a/kernel/kthread.c
> +++ b/kernel/kthread.c
> @@ -53,6 +53,7 @@ struct kthread_create_info
>  struct kthread {
>  	unsigned long flags;
>  	unsigned int cpu;
> +	int started;
>  	int result;
>  	int (*threadfn)(void *);
>  	void *data;
> @@ -382,6 +383,8 @@ static int kthread(void *_create)
>  	schedule_preempt_disabled();
>  	preempt_enable();
>  
> +	self->started = 1;
> +
>  	ret = -EINTR;
>  	if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
>  		cgroup_kthread_ready();
> @@ -540,7 +543,9 @@ static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int
>  
>  void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
>  {
> +	struct kthread *kthread = to_kthread(p);
>  	__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
> +	WARN_ON_ONCE(kthread->started);
>  }
>  
>  /**
> @@ -554,7 +559,9 @@ void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
>   */
>  void kthread_bind(struct task_struct *p, unsigned int cpu)
>  {
> +	struct kthread *kthread = to_kthread(p);
>  	__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
> +	WARN_ON_ONCE(kthread->started);
>  }
>  EXPORT_SYMBOL(kthread_bind);
>
diff mbox series

Patch

diff --git a/kernel/kthread.c b/kernel/kthread.c
index f7be976ff88a..ecb719f54f7a 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -53,6 +53,7 @@  struct kthread_create_info
 struct kthread {
 	unsigned long flags;
 	unsigned int cpu;
+	int started;
 	int result;
 	int (*threadfn)(void *);
 	void *data;
@@ -382,6 +383,8 @@  static int kthread(void *_create)
 	schedule_preempt_disabled();
 	preempt_enable();
 
+	self->started = 1;
+
 	ret = -EINTR;
 	if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
 		cgroup_kthread_ready();
@@ -540,7 +543,9 @@  static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int
 
 void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
 {
+	struct kthread *kthread = to_kthread(p);
 	__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
+	WARN_ON_ONCE(kthread->started);
 }
 
 /**
@@ -554,7 +559,9 @@  void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
  */
 void kthread_bind(struct task_struct *p, unsigned int cpu)
 {
+	struct kthread *kthread = to_kthread(p);
 	__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
+	WARN_ON_ONCE(kthread->started);
 }
 EXPORT_SYMBOL(kthread_bind);