@@ -46,6 +46,7 @@
#include <linux/livepatch_sched.h>
#include <linux/uidgid_types.h>
#include <asm/kmap_size.h>
+#include <linux/irq_work_types.h>
/* task_struct member predeclarations (sorted alphabetically): */
struct audit_context;
@@ -813,6 +814,9 @@ struct task_struct {
#ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group;
struct callback_head sched_throttle_work;
+#ifdef CONFIG_CFS_BANDWIDTH
+ struct irq_work unthrottle_irq_work;
+#endif
#endif
@@ -5764,11 +5764,17 @@ static void throttle_cfs_rq_work(struct callback_head *work)
}
+static void task_throttle_cancel_irq_work_fn(struct irq_work *work)
+{
+ /* Write me */
+}
+
void init_cfs_throttle_work(struct task_struct *p)
{
/* Protect against double add, see throttle_cfs_rq() and throttle_cfs_rq_work() */
p->sched_throttle_work.next = &p->sched_throttle_work;
init_task_work(&p->sched_throttle_work, throttle_cfs_rq_work);
+ p->unthrottle_irq_work = IRQ_WORK_INIT_HARD(task_throttle_cancel_irq_work_fn);
}
static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
Moving towards per-task throttling, the soon-to-be task_work used for the actual throttling will need to be cancelled when a task is moving out of a throttled cfs_rq and into a non-throttled cfs_rq (or out of CFS altogether). Such code paths will have at least the rq lock held, sometimes both the rq and the p->pi_lock locks held. Functions such as migrate_task_rq_fair() don't have guarantees as to which of the two is held, as such the cancellation will need to happen in a separate context. It will be punted to irq_work context, the groundwork is added here and the irq_work callback will be implemented when switching to per-task throttling. Signed-off-by: Valentin Schneider <vschneid@redhat.com> --- include/linux/sched.h | 4 ++++ kernel/sched/fair.c | 6 ++++++ 2 files changed, 10 insertions(+)