diff mbox series

[RFC,v3,05/10] sched/fair: Introduce an irq_work for cancelling throttle task_work

Message ID 20240711130004.2157737-6-vschneid@redhat.com (mailing list archive)
State New, archived
Headers show
Series sched/fair: Defer CFS throttle to user entry | expand

Commit Message

Valentin Schneider July 11, 2024, 12:59 p.m. UTC
Moving towards per-task throttling, the soon-to-be task_work used for the
actual throttling will need to be cancelled when a task is moving out of a
throttled cfs_rq and into a non-throttled cfs_rq (or out of CFS
altogether).

Such code paths will have at least the rq lock held, sometimes both the rq
and the p->pi_lock locks held. Functions such as migrate_task_rq_fair()
don't have guarantees as to which of the two is held, as such the
cancellation will need to happen in a separate context.

It will be punted to irq_work context, the groundwork is added here and the
irq_work callback will be implemented when switching to per-task
throttling.

Signed-off-by: Valentin Schneider <vschneid@redhat.com>
---
 include/linux/sched.h | 4 ++++
 kernel/sched/fair.c   | 6 ++++++
 2 files changed, 10 insertions(+)
diff mbox series

Patch

diff --git a/include/linux/sched.h b/include/linux/sched.h
index a4976eb5065fc..99a1e77d769db 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -46,6 +46,7 @@ 
 #include <linux/livepatch_sched.h>
 #include <linux/uidgid_types.h>
 #include <asm/kmap_size.h>
+#include <linux/irq_work_types.h>
 
 /* task_struct member predeclarations (sorted alphabetically): */
 struct audit_context;
@@ -813,6 +814,9 @@  struct task_struct {
 #ifdef CONFIG_CGROUP_SCHED
 	struct task_group		*sched_task_group;
 	struct callback_head            sched_throttle_work;
+#ifdef CONFIG_CFS_BANDWIDTH
+	struct irq_work                 unthrottle_irq_work;
+#endif
 #endif
 
 
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 775547cdd3ce0..095357bd17f0e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5764,11 +5764,17 @@  static void throttle_cfs_rq_work(struct callback_head *work)
 
 }
 
+static void task_throttle_cancel_irq_work_fn(struct irq_work *work)
+{
+       /* Write me */
+}
+
 void init_cfs_throttle_work(struct task_struct *p)
 {
 	/* Protect against double add, see throttle_cfs_rq() and throttle_cfs_rq_work() */
 	p->sched_throttle_work.next = &p->sched_throttle_work;
 	init_task_work(&p->sched_throttle_work, throttle_cfs_rq_work);
+	p->unthrottle_irq_work = IRQ_WORK_INIT_HARD(task_throttle_cancel_irq_work_fn);
 }
 
 static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)