diff mbox series

[RFC,v3,03/10] task_work, sched: Add a _locked variant to task_work_cancel()

Message ID 20240711130004.2157737-4-vschneid@redhat.com (mailing list archive)
State New, archived
Headers show
Series sched/fair: Defer CFS throttle to user entry | expand

Commit Message

Valentin Schneider July 11, 2024, 12:59 p.m. UTC
Later commits will need to issue a task_work_cancel() from within the
scheduler with the task's ->pi_lock held.

Add a _locked variant that expects p->pi_lock to be held. Expose it in a
separate scheduler header file, as this really is a scheduler-only
interface.

Signed-off-by: Valentin Schneider <vschneid@redhat.com>
---
 kernel/sched/task_work_sched.h | 14 +++++++
 kernel/task_work.c             | 67 ++++++++++++++++++++++++++--------
 2 files changed, 66 insertions(+), 15 deletions(-)
 create mode 100644 kernel/sched/task_work_sched.h

Comments

Oleg Nesterov July 12, 2024, 10:35 a.m. UTC | #1
On 07/11, Valentin Schneider wrote:
>
> Later commits will need to issue a task_work_cancel() from within the
> scheduler with the task's ->pi_lock held.
>
> Add a _locked variant that expects p->pi_lock to be held. Expose it in a
> separate scheduler header file, as this really is a scheduler-only
> interface.
>
> Signed-off-by: Valentin Schneider <vschneid@redhat.com>
> ---
>  kernel/sched/task_work_sched.h | 14 +++++++
>  kernel/task_work.c             | 67 ++++++++++++++++++++++++++--------
>  2 files changed, 66 insertions(+), 15 deletions(-)
>  create mode 100644 kernel/sched/task_work_sched.h

I am not sure the new task_work_sched.h makes sense, but I won't argue.

Reviewed-by: Oleg Nesterov <oleg@redhat.com>
Peter Zijlstra July 12, 2024, 3:20 p.m. UTC | #2
On Thu, Jul 11, 2024 at 02:59:57PM +0200, Valentin Schneider wrote:
> Later commits will need to issue a task_work_cancel() from within the
> scheduler with the task's ->pi_lock held.
> 
> Add a _locked variant that expects p->pi_lock to be held. Expose it in a
> separate scheduler header file, as this really is a scheduler-only
> interface.
> 
> Signed-off-by: Valentin Schneider <vschneid@redhat.com>
> ---
>  kernel/sched/task_work_sched.h | 14 +++++++
>  kernel/task_work.c             | 67 ++++++++++++++++++++++++++--------
>  2 files changed, 66 insertions(+), 15 deletions(-)
>  create mode 100644 kernel/sched/task_work_sched.h
> 
> diff --git a/kernel/sched/task_work_sched.h b/kernel/sched/task_work_sched.h
> new file mode 100644
> index 0000000000000..e235da456427f
> --- /dev/null
> +++ b/kernel/sched/task_work_sched.h
> @@ -0,0 +1,14 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +/*
> + * Scheduler internal task_work methods
> + */
> +#ifndef _KERNEL_TASK_WORK_SCHED_H
> +#define _KERNEL_TASK_WORK_SCHED_H
> +
> +#include <linux/task_work.h>
> +#include <linux/sched.h>
> +
> +struct callback_head *
> +task_work_cancel_locked(struct task_struct *task, task_work_func_t func);
> +
> +#endif


Do we really need that exposed? Can't we squirrel that way in
kernel/sched/sched.h and forget about it?


> @@ -74,33 +76,20 @@ int task_work_add(struct task_struct *task, struct callback_head *work,
>  	return 0;
>  }
>  
> -/**
> - * task_work_cancel_match - cancel a pending work added by task_work_add()
> - * @task: the task which should execute the work
> - * @match: match function to call
> - * @data: data to be passed in to match function
> - *
> - * RETURNS:
> - * The found work or NULL if not found.
> - */
> -struct callback_head *
> -task_work_cancel_match(struct task_struct *task,
> +static struct callback_head *
> +task_work_cancel_match_locked(struct task_struct *task,
>  		       bool (*match)(struct callback_head *, void *data),
>  		       void *data)
>  {
>  	struct callback_head **pprev = &task->task_works;
>  	struct callback_head *work;
> -	unsigned long flags;
>  
> -	if (likely(!task_work_pending(task)))
> -		return NULL;
>  	/*
>  	 * If cmpxchg() fails we continue without updating pprev.
>  	 * Either we raced with task_work_add() which added the
>  	 * new entry before this work, we will find it again. Or
>  	 * we raced with task_work_run(), *pprev == NULL/exited.
>  	 */
> -	raw_spin_lock_irqsave(&task->pi_lock, flags);
>  	work = READ_ONCE(*pprev);
>  	while (work) {
>  		if (!match(work, data)) {
> @@ -109,6 +98,32 @@ task_work_cancel_match(struct task_struct *task,
>  		} else if (try_cmpxchg(pprev, &work, work->next))
>  			break;
>  	}
> +
> +	return work;
> +}

> @@ -136,6 +151,28 @@ task_work_cancel(struct task_struct *task, task_work_func_t func)
>  	return task_work_cancel_match(task, task_work_func_match, func);
>  }
>  
> +/**
> + * task_work_cancel - cancel a pending work added by task_work_add()
> + * @task: the task which should execute the work
> + * @func: identifies the work to remove
> + *
> + * Find the last queued pending work with ->func == @func and remove
> + * it from queue.
> + *
> + * RETURNS:
> + * The found work or NULL if not found.
> + */
> +struct callback_head *
> +task_work_cancel_locked(struct task_struct *task, task_work_func_t func)
> +{
> +	lockdep_assert_held(&task->pi_lock);

I'm thinking that lockde_assert wants to live in your _locked function
above.

> +	if (likely(!task_work_pending(task)))
> +		return NULL;
> +
> +	return task_work_cancel_match_locked(task, task_work_func_match, func);
> +}
> +
>  /**
>   * task_work_run - execute the works added by task_work_add()
>   *
> -- 
> 2.43.0
>
Valentin Schneider July 15, 2024, 5:57 p.m. UTC | #3
On 12/07/24 17:20, Peter Zijlstra wrote:
> On Thu, Jul 11, 2024 at 02:59:57PM +0200, Valentin Schneider wrote:
>> Later commits will need to issue a task_work_cancel() from within the
>> scheduler with the task's ->pi_lock held.
>> 
>> Add a _locked variant that expects p->pi_lock to be held. Expose it in a
>> separate scheduler header file, as this really is a scheduler-only
>> interface.
>> 
>> Signed-off-by: Valentin Schneider <vschneid@redhat.com>
>> ---
>>  kernel/sched/task_work_sched.h | 14 +++++++
>>  kernel/task_work.c             | 67 ++++++++++++++++++++++++++--------
>>  2 files changed, 66 insertions(+), 15 deletions(-)
>>  create mode 100644 kernel/sched/task_work_sched.h
>> 
>> diff --git a/kernel/sched/task_work_sched.h b/kernel/sched/task_work_sched.h
>> new file mode 100644
>> index 0000000000000..e235da456427f
>> --- /dev/null
>> +++ b/kernel/sched/task_work_sched.h
>> @@ -0,0 +1,14 @@
>> +/* SPDX-License-Identifier: GPL-2.0 */
>> +/*
>> + * Scheduler internal task_work methods
>> + */
>> +#ifndef _KERNEL_TASK_WORK_SCHED_H
>> +#define _KERNEL_TASK_WORK_SCHED_H
>> +
>> +#include <linux/task_work.h>
>> +#include <linux/sched.h>
>> +
>> +struct callback_head *
>> +task_work_cancel_locked(struct task_struct *task, task_work_func_t func);
>> +
>> +#endif
>
>
> Do we really need that exposed? Can't we squirrel that way in
> kernel/sched/sched.h and forget about it?
>

Nah that's not required, I thought a clean cut header would be neater but
given its single user, tossing that to sched.h looks better.

>> +struct callback_head *
>> +task_work_cancel_locked(struct task_struct *task, task_work_func_t func)
>> +{
>> +	lockdep_assert_held(&task->pi_lock);
>
> I'm thinking that lockde_assert wants to live in your _locked function
> above.
>

Quite so!
diff mbox series

Patch

diff --git a/kernel/sched/task_work_sched.h b/kernel/sched/task_work_sched.h
new file mode 100644
index 0000000000000..e235da456427f
--- /dev/null
+++ b/kernel/sched/task_work_sched.h
@@ -0,0 +1,14 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Scheduler internal task_work methods
+ */
+#ifndef _KERNEL_TASK_WORK_SCHED_H
+#define _KERNEL_TASK_WORK_SCHED_H
+
+#include <linux/task_work.h>
+#include <linux/sched.h>
+
+struct callback_head *
+task_work_cancel_locked(struct task_struct *task, task_work_func_t func);
+
+#endif
diff --git a/kernel/task_work.c b/kernel/task_work.c
index 95a7e1b7f1dab..81092bc2e7371 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -3,6 +3,8 @@ 
 #include <linux/task_work.h>
 #include <linux/resume_user_mode.h>
 
+#include "sched/task_work_sched.h"
+
 static struct callback_head work_exited; /* all we need is ->next == NULL */
 
 /**
@@ -74,33 +76,20 @@  int task_work_add(struct task_struct *task, struct callback_head *work,
 	return 0;
 }
 
-/**
- * task_work_cancel_match - cancel a pending work added by task_work_add()
- * @task: the task which should execute the work
- * @match: match function to call
- * @data: data to be passed in to match function
- *
- * RETURNS:
- * The found work or NULL if not found.
- */
-struct callback_head *
-task_work_cancel_match(struct task_struct *task,
+static struct callback_head *
+task_work_cancel_match_locked(struct task_struct *task,
 		       bool (*match)(struct callback_head *, void *data),
 		       void *data)
 {
 	struct callback_head **pprev = &task->task_works;
 	struct callback_head *work;
-	unsigned long flags;
 
-	if (likely(!task_work_pending(task)))
-		return NULL;
 	/*
 	 * If cmpxchg() fails we continue without updating pprev.
 	 * Either we raced with task_work_add() which added the
 	 * new entry before this work, we will find it again. Or
 	 * we raced with task_work_run(), *pprev == NULL/exited.
 	 */
-	raw_spin_lock_irqsave(&task->pi_lock, flags);
 	work = READ_ONCE(*pprev);
 	while (work) {
 		if (!match(work, data)) {
@@ -109,6 +98,32 @@  task_work_cancel_match(struct task_struct *task,
 		} else if (try_cmpxchg(pprev, &work, work->next))
 			break;
 	}
+
+	return work;
+}
+
+/**
+ * task_work_cancel_match - cancel a pending work added by task_work_add()
+ * @task: the task which should execute the work
+ * @match: match function to call
+ * @data: data to be passed in to match function
+ *
+ * RETURNS:
+ * The found work or NULL if not found.
+ */
+struct callback_head *
+task_work_cancel_match(struct task_struct *task,
+		       bool (*match)(struct callback_head *, void *data),
+		       void *data)
+{
+	unsigned long flags;
+	struct callback_head *work;
+
+	if (likely(!task_work_pending(task)))
+		return NULL;
+
+	raw_spin_lock_irqsave(&task->pi_lock, flags);
+	work = task_work_cancel_match_locked(task, match, data);
 	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
 
 	return work;
@@ -136,6 +151,28 @@  task_work_cancel(struct task_struct *task, task_work_func_t func)
 	return task_work_cancel_match(task, task_work_func_match, func);
 }
 
+/**
+ * task_work_cancel - cancel a pending work added by task_work_add()
+ * @task: the task which should execute the work
+ * @func: identifies the work to remove
+ *
+ * Find the last queued pending work with ->func == @func and remove
+ * it from queue.
+ *
+ * RETURNS:
+ * The found work or NULL if not found.
+ */
+struct callback_head *
+task_work_cancel_locked(struct task_struct *task, task_work_func_t func)
+{
+	lockdep_assert_held(&task->pi_lock);
+
+	if (likely(!task_work_pending(task)))
+		return NULL;
+
+	return task_work_cancel_match_locked(task, task_work_func_match, func);
+}
+
 /**
  * task_work_run - execute the works added by task_work_add()
  *