@@ -5776,6 +5776,17 @@ static void task_throttle_cancel_irq_work_fn(struct irq_work *work)
/* Write me */
}
+static void task_woken_fair(struct rq *rq, struct task_struct *p)
+{
+ if (!cfs_bandwidth_used())
+ return;
+
+ if (task_needs_throttling(p))
+ task_throttle_setup(p);
+ else
+ task_throttle_cancel(p);
+}
+
void init_cfs_throttle_work(struct task_struct *p)
{
/* Protect against double add, see throttle_cfs_rq() and throttle_cfs_rq_work() */
@@ -13288,6 +13299,10 @@ DEFINE_SCHED_CLASS(fair) = {
.task_change_group = task_change_group_fair,
#endif
+#ifdef CONFIG_CFS_BANDWIDTH
+ .task_woken = task_woken_fair,
+#endif
+
#ifdef CONFIG_SCHED_CORE
.task_is_throttled = task_is_throttled_fair,
#endif
Later commits will change CFS bandwidth control throttling from a per-cfs_rq basis to a per-task basis. This means special care needs to be taken around any transition a task can have into and out of a cfs_rq. To ease reviewing, the transitions are patched with dummy-helpers that are implemented later on. Add a class->task_woken callback to handle tasks being woken into potentially throttled cfs_rq's. Conversely, a task flagged for throttle-at-kernel-exit may block and need to have its pending throttle removed if runtime was replenished by the time it got woken up. Signed-off-by: Valentin Schneider <vschneid@redhat.com> --- kernel/sched/fair.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+)