From patchwork Sat Jan 28 00:16:10 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 13119556 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 2026BC54EAA for ; Sat, 28 Jan 2023 00:17:01 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230036AbjA1AQ7 (ORCPT ); Fri, 27 Jan 2023 19:16:59 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:35404 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229681AbjA1AQ6 (ORCPT ); Fri, 27 Jan 2023 19:16:58 -0500 Received: from mail-pj1-x1029.google.com (mail-pj1-x1029.google.com [IPv6:2607:f8b0:4864:20::1029]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 0F2507B7A4; Fri, 27 Jan 2023 16:16:55 -0800 (PST) Received: by mail-pj1-x1029.google.com with SMTP id m11so6171348pji.0; Fri, 27 Jan 2023 16:16:55 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=NRDQhCBgwfQ8yY1Xopw2z/Bq7Rk/zYbqVVbUajPCRmc=; b=HEyk5w2j6vHmfk6U3e54rbxPo0XdVCGax0gdOXrMmcBFEr7jOeghdI4JZjsBD/fn/9 0vLWDAJL8v2JVkkB/4Gwdh2xQFq72LArOXYolPZY4vATmx+unpg26DDP0pI2UZKvq4pZ KcsQehsz96z9mgjyGi9eUPy7GjNu/l+5JEx236MuzTUw4CRLKRx2HacMvBN5wF3X+nTo KIdwQz87pEzvgZmWNpHwzUYcGK4h2kjUPtIlgBHMLMjJ17veWIk7BM15Bfq/v2ARrtyt XARx7wl/l80SY1t6sXp95juEPQIMr+n+EA7WWUl4pK6Q2G9kCZeUwXfW7AOzA83JU++l 1+Mg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=NRDQhCBgwfQ8yY1Xopw2z/Bq7Rk/zYbqVVbUajPCRmc=; b=p72bLxDtuqnkn+zAMEfDuof39IikDSmHeK0vmWaD1dESOwF5Jq8MfwuhsGyDIO2nKk l9PG/iCw11WfQ15QeAVA2/anCz/er+XyL3kVq85QB1h/rpkhK5BvMAWHUNMtgvCYyedH XXhOOKTkplz1i3QmhgZRQm1VUtc67XaDDSncKGTIuVX9IIjECUz1MkxLQYVtIMYKxU7l t50sIuT/gpsgO2YNWU3ezWq2cVuaNxkNNBMjMU830H1ohgvEOGaL52koVQ54jDmA0Gj5 hoKcPi/vF8Acynt9UBEtOnfa6iprwCQoBUZcYyMFShVe8aPntcjN5mI/fhdXe333/e/d PV8w== X-Gm-Message-State: AFqh2kqyloJoWCxKZhWV1YXAgRwhpzRi+/LphvmUqP+VYcINpl17/9fl RGyQAC/mZz3q+6hKd7xMh0c= X-Google-Smtp-Source: AMrXdXsIKi0/c3/RChakxDcLuJFeuklhKik4NKkq0YUhv58lgWGV9adQo0XALxAkUUa3eQrkcQDQGQ== X-Received: by 2002:a17:90b:3b45:b0:22b:b6c5:fa7e with SMTP id ot5-20020a17090b3b4500b0022bb6c5fa7emr29048413pjb.35.1674865014368; Fri, 27 Jan 2023 16:16:54 -0800 (PST) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id k9-20020a17090a3cc900b0022bee492a5dsm5503602pjd.23.2023.01.27.16.16.53 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 27 Jan 2023 16:16:54 -0800 (PST) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 01/30] cgroup: Implement cgroup_show_cftypes() Date: Fri, 27 Jan 2023 14:16:10 -1000 Message-Id: <20230128001639.3510083-2-tj@kernel.org> X-Mailer: git-send-email 2.39.1 In-Reply-To: <20230128001639.3510083-1-tj@kernel.org> References: <20230128001639.3510083-1-tj@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org Implement cgroup_show_cftypes() which shows and hides all cgroup files associated with the specified set of cgroup file types. CFTYPE_HIDDEN flag is added so that files can be created hidden from the get-go. cgroup_show_cftypes() can be used whether the cftypes are added or not. It also combines with cgroup_show_file() so that a given file is visible iff both its cftype and cfile are visible. This will be used by a new sched_class to selectively show and hide CPU controller interface files depending on whether they're supported. Signed-off-by: Tejun Heo Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- include/linux/cgroup-defs.h | 8 +++ include/linux/cgroup.h | 1 + kernel/cgroup/cgroup.c | 97 ++++++++++++++++++++++++++++++++++--- 3 files changed, 99 insertions(+), 7 deletions(-) diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 8a0d5466c7be..8af1e7d487cb 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -127,12 +127,18 @@ enum { CFTYPE_WORLD_WRITABLE = (1 << 4), /* (DON'T USE FOR NEW FILES) S_IWUGO */ CFTYPE_DEBUG = (1 << 5), /* create when cgroup_debug */ + CFTYPE_HIDDEN = (1 << 6), /* file type hidden, see cgroup_show_cftypes() */ + /* internal flags, do not use outside cgroup core proper */ __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */ __CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */ __CFTYPE_ADDED = (1 << 18), }; +enum cfile_flags { + CFILE_HIDDEN = (1 << 0), /* file instance hidden */ +}; + /* * cgroup_file is the handle for a file instance created in a cgroup which * is used, for example, to generate file changed notifications. This can @@ -140,7 +146,9 @@ enum { */ struct cgroup_file { /* do not access any fields from outside cgroup core */ + struct cftype *cft; struct kernfs_node *kn; + unsigned int flags; unsigned long notified_at; struct timer_list notify_timer; }; diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 3410aecffdb4..a8c6982c2c24 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -115,6 +115,7 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); int cgroup_rm_cftypes(struct cftype *cfts); +void cgroup_show_cftype(struct cftype *cft, bool show); void cgroup_file_notify(struct cgroup_file *cfile); void cgroup_file_show(struct cgroup_file *cfile, bool show); diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index c099cf3fa02d..27bafb2732c5 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -4196,10 +4196,13 @@ static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp, return ret; } + kernfs_show(kn, !(cft->flags & CFTYPE_HIDDEN)); + if (cft->file_offset) { struct cgroup_file *cfile = (void *)css + cft->file_offset; timer_setup(&cfile->notify_timer, cgroup_file_notify_timer, 0); + cfile->cft = cft; spin_lock_irq(&cgroup_file_kn_lock); cfile->kn = kn; @@ -4478,6 +4481,24 @@ void cgroup_file_notify(struct cgroup_file *cfile) spin_unlock_irqrestore(&cgroup_file_kn_lock, flags); } +static struct kernfs_node *cfile_kn_get(struct cgroup_file *cfile) +{ + struct kernfs_node *kn; + + spin_lock_irq(&cgroup_file_kn_lock); + kn = cfile->kn; + kernfs_get(kn); + spin_unlock_irq(&cgroup_file_kn_lock); + + return kn; +} + +static bool cfile_visible(struct cgroup_file *cfile) +{ + return !(cfile->cft->flags & CFTYPE_HIDDEN) && + !(cfile->flags & CFILE_HIDDEN); +} + /** * cgroup_file_show - show or hide a hidden cgroup file * @cfile: target cgroup_file obtained by setting cftype->file_offset @@ -4487,15 +4508,20 @@ void cgroup_file_show(struct cgroup_file *cfile, bool show) { struct kernfs_node *kn; - spin_lock_irq(&cgroup_file_kn_lock); - kn = cfile->kn; - kernfs_get(kn); - spin_unlock_irq(&cgroup_file_kn_lock); + mutex_lock(&cgroup_mutex); - if (kn) - kernfs_show(kn, show); + if (show) + cfile->flags &= ~CFILE_HIDDEN; + else + cfile->flags |= CFILE_HIDDEN; - kernfs_put(kn); + kn = cfile_kn_get(cfile); + if (kn) { + kernfs_show(kn, cfile_visible(cfile)); + kernfs_put(kn); + } + + mutex_unlock(&cgroup_mutex); } /** @@ -5509,6 +5535,63 @@ static void offline_css(struct cgroup_subsys_state *css) wake_up_all(&css->cgroup->offline_waitq); } +/** + * cgroup_show_cftype - show or hide a cgroup file type + * @cft: cftype to show or hide + * @show: whether to show or hide + * + * Sets %CFTYPE_HIDDEN and shows/hides the matching files according to @show. + * @cft may or may not be added at the time of this call. After hiding, it's + * guaranteed that there are no in-flight operations on the hidden files. + */ +void cgroup_show_cftype(struct cftype *cft, bool show) +{ + struct cgroup_subsys *ss = cft->ss; + struct cgroup *root = ss ? &ss->root->cgrp : &cgrp_dfl_root.cgrp; + struct cgroup_subsys_state *css; + + mutex_lock(&cgroup_mutex); + + if (show) + cft->flags &= ~CFTYPE_HIDDEN; + else + cft->flags |= CFTYPE_HIDDEN; + + if (!(cft->flags & __CFTYPE_ADDED)) + goto out_unlock; + + css_for_each_descendant_pre(css, cgroup_css(root, ss)) { + struct cgroup *cgrp = css->cgroup; + struct kernfs_node *kn; + + if (!(css->flags & CSS_VISIBLE)) + continue; + + if (cft->file_offset) { + struct cgroup_file *cfile = + (void *)css + cft->file_offset; + + kn = cfile_kn_get(cfile); + if (kn) { + kernfs_show(kn, cfile_visible(cfile)); + kernfs_put(kn); + } + } else { + char buf[CGROUP_FILE_NAME_MAX]; + + kn = kernfs_find_and_get(cgrp->kn, + cgroup_file_name(cgrp, cft, buf)); + if (kn) { + kernfs_show(kn, show); + kernfs_put(kn); + } + } + } + +out_unlock: + mutex_unlock(&cgroup_mutex); +} + /** * css_create - create a cgroup_subsys_state * @cgrp: the cgroup new css will be associated with From patchwork Sat Jan 28 00:16:11 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 13119559 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id C951FC61DA7 for ; Sat, 28 Jan 2023 00:17:01 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229506AbjA1ARA (ORCPT ); Fri, 27 Jan 2023 19:17:00 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:35406 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229701AbjA1AQ6 (ORCPT ); Fri, 27 Jan 2023 19:16:58 -0500 Received: from mail-pl1-x62a.google.com (mail-pl1-x62a.google.com [IPv6:2607:f8b0:4864:20::62a]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 0686B7B7AB; Fri, 27 Jan 2023 16:16:57 -0800 (PST) Received: by mail-pl1-x62a.google.com with SMTP id jl3so6559138plb.8; Fri, 27 Jan 2023 16:16:57 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=5HZHa2gfgb8Zdv7r+/ewKzMZze1+sOuN4ROxBuKn10I=; b=oRm5jPJmF1orSuU6GpR3idM02s3W6ElgPvwDSf2lltreMkjLb/3Sgr50VP7R/jdPRN PvN3JBXyxwmGjKaxW/CeVDP3nUcreH6E8YoF4IB3J4G5CcAnIYQE0E3G0vRAwcc3wDFP 9YAQwYy44mVrtJUwrVoeEWkjDqSgr6gE/dFAUhd6ZrstS9AjdpMq0eNxXbORByeOupBV GC1BxvTR0+SJN81Bfdej575HL+ogpDqYdq4b614q0+JxQLOgoGdlLqRs1yGv4CnOAzLN gbZ+SA4W2OIW0IWXedtw0nqcsfc11ViQO5x+2VDVQO6zOUvBRDvTXa/2lJg7ly7DylqF LFug== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=5HZHa2gfgb8Zdv7r+/ewKzMZze1+sOuN4ROxBuKn10I=; b=QItGYIASmyYqW0vgpwmHXgzFN5EMyqjJ2BdZRjEQVrWRUlA757Y7/hscanqwfk7QyJ AyYpgUYuSPrbEhrsbXmgM8d4DcDRR9JJK1BtaLuuRxcI7xDcZ8LxK8Lp6zKYin6cnpoX 1iGFqAMfeKe1JB3rM7FHaqsbCcTVZZH4kAOFW467C0A0WY01sgfZmLuxylch7UAoktg0 RTA2+LSO/n/bFusUWDAnaWpYGg2m2fXOP6+vnfa0SjCeFc1gS67gWBEIWYnTIpfVcPTV KzEqxDy4x6lTY6WJNziDBxlKQag5d3o18g2RnfwvcEM3xcxNZ7ghmE7wWLrCfvq3WaVM +GxQ== X-Gm-Message-State: AFqh2kqb0epwLLvXUJV/njleIl0JmZHOZuc9LzwSpw5ty4IlJ9GM5cM/ aEgyFSp/LU7H8jFxwLQjVis= X-Google-Smtp-Source: AMrXdXuyiQZEq7CqpXBJcagQRn6iM9Ne999pZx2vj4xpBgm0SxZ8p9N4d6qwVOG5yeUPbl0KSD9PPg== X-Received: by 2002:a17:90b:4a0c:b0:22a:3c8:9d1 with SMTP id kk12-20020a17090b4a0c00b0022a03c809d1mr31829210pjb.32.1674865016153; Fri, 27 Jan 2023 16:16:56 -0800 (PST) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id gn17-20020a17090ac79100b0022bfc79c71bsm3247317pjb.50.2023.01.27.16.16.55 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 27 Jan 2023 16:16:55 -0800 (PST) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 02/30] sched: Encapsulate task attribute change sequence into a helper macro Date: Fri, 27 Jan 2023 14:16:11 -1000 Message-Id: <20230128001639.3510083-3-tj@kernel.org> X-Mailer: git-send-email 2.39.1 In-Reply-To: <20230128001639.3510083-1-tj@kernel.org> References: <20230128001639.3510083-1-tj@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org A task needs to be dequeued and put before an attribute change and then restored afterwards. This is currently open-coded in multiple places. This patch encapsulates the preparation and restoration sequences into SCHED_CHANGE_BLOCK which allows the actual attribute changes to be put inside its nested block. While the conversions are generally straightforward, there are some subtleties: * If a variable is specified for the flags argument, it can be modified from inside the block body to allow using a different flags value for re-enqueueing. This is used by rt_mutex_setprio() and __sched_setscheduler(). * __sched_setscheduler() used to only set ENQUEUE_HEAD if the task is queued. After the conversion, it sets the flag whether the task is queued or not. This doesn't cause any behavioral differences and is simpler than accessing the internal state of the helper. * In a similar vein, sched_move_task() tests task_current() again after the change block instead of carrying over the test result from inside the change block. This patch is adopted from Peter Zijlstra's draft patch linked below. The changes are: * Call fini explicitly from for() instead of using the __cleanup__ attribute. * Allow the queue flag variable to be modified directly so that the user doesn't have to poke into sched_change_guard struct. Also, in the original patch, rt_mutex_setprio() was incorrectly updating its queue_flag instead of cg.flags. * Some cosmetic changes. Signed-off-by: Tejun Heo Original-patch-by: Peter Zijlstra Link: https://lore.kernel.org/all/20220330162228.GH14330@worktop.programming.kicks-ass.net/T/#u Reviewed-by: David Vernet --- kernel/sched/core.c | 260 ++++++++++++++++++++++---------------------- 1 file changed, 130 insertions(+), 130 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 25b582b6ee5f..bfc3312f305a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2094,6 +2094,76 @@ void deactivate_task(struct rq *rq, struct task_struct *p, int flags) dequeue_task(rq, p, flags); } +struct sched_change_guard { + struct task_struct *p; + struct rq *rq; + bool queued; + bool running; + bool done; +}; + +static struct sched_change_guard +sched_change_guard_init(struct rq *rq, struct task_struct *p, int flags) +{ + struct sched_change_guard cg = { + .rq = rq, + .p = p, + .queued = task_on_rq_queued(p), + .running = task_current(rq, p), + }; + + if (cg.queued) { + /* + * __kthread_bind() may call this on blocked tasks without + * holding rq->lock through __do_set_cpus_allowed(). Assert @rq + * locked iff @p is queued. + */ + lockdep_assert_rq_held(rq); + dequeue_task(rq, p, flags); + } + if (cg.running) + put_prev_task(rq, p); + + return cg; +} + +static void sched_change_guard_fini(struct sched_change_guard *cg, int flags) +{ + if (cg->queued) + enqueue_task(cg->rq, cg->p, flags | ENQUEUE_NOCLOCK); + if (cg->running) + set_next_task(cg->rq, cg->p); + cg->done = true; +} + +/** + * SCHED_CHANGE_BLOCK - Nested block for task attribute updates + * @__rq: Runqueue the target task belongs to + * @__p: Target task + * @__flags: DEQUEUE/ENQUEUE_* flags + * + * A task may need to be dequeued and put_prev_task'd for attribute updates and + * set_next_task'd and re-enqueued afterwards. This helper defines a nested + * block which automatically handles these preparation and cleanup operations. + * + * SCHED_CHANGE_BLOCK(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK) { + * update_attribute(p); + * ... + * } + * + * If @__flags is a variable, the variable may be updated in the block body and + * the updated value will be used when re-enqueueing @p. + * + * If %DEQUEUE_NOCLOCK is specified, the caller is responsible for calling + * update_rq_clock() beforehand. Otherwise, the rq clock is automatically + * updated iff the task needs to be dequeued and re-enqueued. Only the former + * case guarantees that the rq clock is up-to-date inside and after the block. + */ +#define SCHED_CHANGE_BLOCK(__rq, __p, __flags) \ + for (struct sched_change_guard __cg = \ + sched_change_guard_init(__rq, __p, __flags); \ + !__cg.done; sched_change_guard_fini(&__cg, __flags)) + static inline int __normal_prio(int policy, int rt_prio, int nice) { int prio; @@ -2552,7 +2622,6 @@ static void __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx) { struct rq *rq = task_rq(p); - bool queued, running; /* * This here violates the locking rules for affinity, since we're only @@ -2571,26 +2640,9 @@ __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx) else lockdep_assert_held(&p->pi_lock); - queued = task_on_rq_queued(p); - running = task_current(rq, p); - - if (queued) { - /* - * Because __kthread_bind() calls this on blocked tasks without - * holding rq->lock. - */ - lockdep_assert_rq_held(rq); - dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); + SCHED_CHANGE_BLOCK(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK) { + p->sched_class->set_cpus_allowed(p, ctx); } - if (running) - put_prev_task(rq, p); - - p->sched_class->set_cpus_allowed(p, ctx); - - if (queued) - enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); - if (running) - set_next_task(rq, p); } /* @@ -6922,7 +6974,7 @@ static inline int rt_effective_prio(struct task_struct *p, int prio) */ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) { - int prio, oldprio, queued, running, queue_flag = + int prio, oldprio, queue_flag = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; const struct sched_class *prev_class; struct rq_flags rf; @@ -6982,49 +7034,39 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) queue_flag &= ~DEQUEUE_MOVE; prev_class = p->sched_class; - queued = task_on_rq_queued(p); - running = task_current(rq, p); - if (queued) - dequeue_task(rq, p, queue_flag); - if (running) - put_prev_task(rq, p); - - /* - * Boosting condition are: - * 1. -rt task is running and holds mutex A - * --> -dl task blocks on mutex A - * - * 2. -dl task is running and holds mutex A - * --> -dl task blocks on mutex A and could preempt the - * running task - */ - if (dl_prio(prio)) { - if (!dl_prio(p->normal_prio) || - (pi_task && dl_prio(pi_task->prio) && - dl_entity_preempt(&pi_task->dl, &p->dl))) { - p->dl.pi_se = pi_task->dl.pi_se; - queue_flag |= ENQUEUE_REPLENISH; + SCHED_CHANGE_BLOCK(rq, p, queue_flag) { + /* + * Boosting condition are: + * 1. -rt task is running and holds mutex A + * --> -dl task blocks on mutex A + * + * 2. -dl task is running and holds mutex A + * --> -dl task blocks on mutex A and could preempt the + * running task + */ + if (dl_prio(prio)) { + if (!dl_prio(p->normal_prio) || + (pi_task && dl_prio(pi_task->prio) && + dl_entity_preempt(&pi_task->dl, &p->dl))) { + p->dl.pi_se = pi_task->dl.pi_se; + queue_flag |= ENQUEUE_REPLENISH; + } else { + p->dl.pi_se = &p->dl; + } + } else if (rt_prio(prio)) { + if (dl_prio(oldprio)) + p->dl.pi_se = &p->dl; + if (oldprio < prio) + queue_flag |= ENQUEUE_HEAD; } else { - p->dl.pi_se = &p->dl; + if (dl_prio(oldprio)) + p->dl.pi_se = &p->dl; + if (rt_prio(oldprio)) + p->rt.timeout = 0; } - } else if (rt_prio(prio)) { - if (dl_prio(oldprio)) - p->dl.pi_se = &p->dl; - if (oldprio < prio) - queue_flag |= ENQUEUE_HEAD; - } else { - if (dl_prio(oldprio)) - p->dl.pi_se = &p->dl; - if (rt_prio(oldprio)) - p->rt.timeout = 0; - } - - __setscheduler_prio(p, prio); - if (queued) - enqueue_task(rq, p, queue_flag); - if (running) - set_next_task(rq, p); + __setscheduler_prio(p, prio); + } check_class_changed(rq, p, prev_class, oldprio); out_unlock: @@ -7046,7 +7088,6 @@ static inline int rt_effective_prio(struct task_struct *p, int prio) void set_user_nice(struct task_struct *p, long nice) { - bool queued, running; int old_prio; struct rq_flags rf; struct rq *rq; @@ -7070,22 +7111,13 @@ void set_user_nice(struct task_struct *p, long nice) p->static_prio = NICE_TO_PRIO(nice); goto out_unlock; } - queued = task_on_rq_queued(p); - running = task_current(rq, p); - if (queued) - dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); - if (running) - put_prev_task(rq, p); - - p->static_prio = NICE_TO_PRIO(nice); - set_load_weight(p, true); - old_prio = p->prio; - p->prio = effective_prio(p); - if (queued) - enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); - if (running) - set_next_task(rq, p); + SCHED_CHANGE_BLOCK(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK) { + p->static_prio = NICE_TO_PRIO(nice); + set_load_weight(p, true); + old_prio = p->prio; + p->prio = effective_prio(p); + } /* * If the task increased its priority or is running and @@ -7469,7 +7501,7 @@ static int __sched_setscheduler(struct task_struct *p, bool user, bool pi) { int oldpolicy = -1, policy = attr->sched_policy; - int retval, oldprio, newprio, queued, running; + int retval, oldprio, newprio; const struct sched_class *prev_class; struct balance_callback *head; struct rq_flags rf; @@ -7634,33 +7666,22 @@ static int __sched_setscheduler(struct task_struct *p, queue_flags &= ~DEQUEUE_MOVE; } - queued = task_on_rq_queued(p); - running = task_current(rq, p); - if (queued) - dequeue_task(rq, p, queue_flags); - if (running) - put_prev_task(rq, p); - - prev_class = p->sched_class; + SCHED_CHANGE_BLOCK(rq, p, queue_flags) { + prev_class = p->sched_class; - if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) { - __setscheduler_params(p, attr); - __setscheduler_prio(p, newprio); - } - __setscheduler_uclamp(p, attr); + if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) { + __setscheduler_params(p, attr); + __setscheduler_prio(p, newprio); + } + __setscheduler_uclamp(p, attr); - if (queued) { /* * We enqueue to tail when the priority of a task is * increased (user space view). */ if (oldprio < p->prio) queue_flags |= ENQUEUE_HEAD; - - enqueue_task(rq, p, queue_flags); } - if (running) - set_next_task(rq, p); check_class_changed(rq, p, prev_class, oldprio); @@ -9177,25 +9198,15 @@ int migrate_task_to(struct task_struct *p, int target_cpu) */ void sched_setnuma(struct task_struct *p, int nid) { - bool queued, running; struct rq_flags rf; struct rq *rq; rq = task_rq_lock(p, &rf); - queued = task_on_rq_queued(p); - running = task_current(rq, p); - if (queued) - dequeue_task(rq, p, DEQUEUE_SAVE); - if (running) - put_prev_task(rq, p); - - p->numa_preferred_nid = nid; + SCHED_CHANGE_BLOCK(rq, p, DEQUEUE_SAVE) { + p->numa_preferred_nid = nid; + } - if (queued) - enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); - if (running) - set_next_task(rq, p); task_rq_unlock(rq, p, &rf); } #endif /* CONFIG_NUMA_BALANCING */ @@ -10287,35 +10298,24 @@ static void sched_change_group(struct task_struct *tsk) */ void sched_move_task(struct task_struct *tsk) { - int queued, running, queue_flags = - DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; struct rq_flags rf; struct rq *rq; rq = task_rq_lock(tsk, &rf); update_rq_clock(rq); - running = task_current(rq, tsk); - queued = task_on_rq_queued(tsk); - - if (queued) - dequeue_task(rq, tsk, queue_flags); - if (running) - put_prev_task(rq, tsk); - - sched_change_group(tsk); + SCHED_CHANGE_BLOCK(rq, tsk, + DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK) { + sched_change_group(tsk); + } - if (queued) - enqueue_task(rq, tsk, queue_flags); - if (running) { - set_next_task(rq, tsk); - /* - * After changing group, the running task may have joined a - * throttled one but it's still the running task. Trigger a - * resched to make sure that task can still run. - */ + /* + * After changing group, the running task may have joined a throttled + * one but it's still the running task. Trigger a resched to make sure + * that task can still run. + */ + if (task_current(rq, tsk)) resched_curr(rq); - } task_rq_unlock(rq, tsk, &rf); } From patchwork Sat Jan 28 00:16:12 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 13119557 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id BCFDAC636BD for ; Sat, 28 Jan 2023 00:17:02 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230230AbjA1ARA (ORCPT ); Fri, 27 Jan 2023 19:17:00 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:35412 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229797AbjA1AQ7 (ORCPT ); Fri, 27 Jan 2023 19:16:59 -0500 Received: from mail-pj1-x102c.google.com (mail-pj1-x102c.google.com [IPv6:2607:f8b0:4864:20::102c]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id A6F977B40C; Fri, 27 Jan 2023 16:16:58 -0800 (PST) Received: by mail-pj1-x102c.google.com with SMTP id j5so6123002pjn.5; Fri, 27 Jan 2023 16:16:58 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=Rv/Uvu6EIXemVLIIVSqALTZG9BUUBydUSWzFLjEsx9Q=; b=LKnAW9kbk6fSzGjKiTmZQwsTmnSrS+oZ9BtMVJkwkeOpm9dgSgoKqp8mDg7qin9Owz iwNZ/3tU2uAJfzTsBa9p97wcSufjZ6NeizVN/nRD4LsRCyX6592NaWhKMUHZzFQ2g8AU NMEEf08EBE3300salRJMVuYj73w27QqZySqFazJKi3/4ekFmx8hUv0hgRYOQ8HXi9DvI w2aWyGkWqqv8Bazn89LJQnZsv8Rv2cl2lW1FMmL6OWO1SS/h9Jmr9I4BXIOoSU+47MY4 g7gJb9UQIMRf4syiLmlC8Xeg+H8YTyf4w6XM9ruwmOs+CQ4Am/+JuRpygMaQQhhAHIt3 GsTg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=Rv/Uvu6EIXemVLIIVSqALTZG9BUUBydUSWzFLjEsx9Q=; b=ZFJRna3KHbswrl1Yw32voTkCtBT6/5jvv8U5ShwnQYGohwJtN03eNhGz0k7W83wxkh ilnLfOiSk+sY6vrjenDUnx2ZTWXaGjMwE69LV29cP2O1D/dIxkaZE2L4mEi6BB6iOeJj eGw/drcISIXbCzG+oHqdgp9aVdTO6kWiM1rUgD/g++L99mIXI6DgSyrgVvViMciuesQY 1zVaxSFucYlaD5uzlqiKaAiZ1KZ5cqi4cam+1/SUxgemh4iSr2hjMOan95wMHjRWbU/5 8KB02DAqqLBVPmwgwl2zAbUUP6MFNi+VsHtabQORT0xKuAPV1urak5utiOa6ClZ2YGLy zcrQ== X-Gm-Message-State: AFqh2kopRg419zEkmrl3/TCz3NC0F78hueN0JP79+W7642Iqaf1+5iij G+ITm4k7iAKszkXo90YFCnU= X-Google-Smtp-Source: AMrXdXuDbEZYk/r9/lUgsj4m90FAYld3xbjORnaamrPrJgOOCV49qdsiTYysJnM5Msc/44bwjcJh2A== X-Received: by 2002:a17:902:9b97:b0:194:a1f6:65ae with SMTP id y23-20020a1709029b9700b00194a1f665aemr37365357plp.12.1674865017926; Fri, 27 Jan 2023 16:16:57 -0800 (PST) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id b7-20020a170902d50700b0017f74cab9eesm3428123plg.128.2023.01.27.16.16.57 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 27 Jan 2023 16:16:57 -0800 (PST) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 03/30] sched: Restructure sched_class order sanity checks in sched_init() Date: Fri, 27 Jan 2023 14:16:12 -1000 Message-Id: <20230128001639.3510083-4-tj@kernel.org> X-Mailer: git-send-email 2.39.1 In-Reply-To: <20230128001639.3510083-1-tj@kernel.org> References: <20230128001639.3510083-1-tj@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org Currently, sched_init() checks that the sched_class'es are in the expected order by testing each adjacency which is a bit brittle and makes it cumbersome to add optional sched_class'es. Instead, let's verify whether they're in the expected order using sched_class_above() which is what matters. Signed-off-by: Tejun Heo Suggested-by: Peter Zijlstra Reviewed-by: David Vernet --- kernel/sched/core.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index bfc3312f305a..65f21f8bf738 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -9721,12 +9721,12 @@ void __init sched_init(void) int i; /* Make sure the linker didn't screw up */ - BUG_ON(&idle_sched_class != &fair_sched_class + 1 || - &fair_sched_class != &rt_sched_class + 1 || - &rt_sched_class != &dl_sched_class + 1); #ifdef CONFIG_SMP - BUG_ON(&dl_sched_class != &stop_sched_class + 1); + BUG_ON(!sched_class_above(&stop_sched_class, &dl_sched_class)); #endif + BUG_ON(!sched_class_above(&dl_sched_class, &rt_sched_class)); + BUG_ON(!sched_class_above(&rt_sched_class, &fair_sched_class)); + BUG_ON(!sched_class_above(&fair_sched_class, &idle_sched_class)); wait_bit_init(); From patchwork Sat Jan 28 00:16:13 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 13119558 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id CD43BC54EAA for ; Sat, 28 Jan 2023 00:17:03 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231579AbjA1ARC (ORCPT ); Fri, 27 Jan 2023 19:17:02 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:35446 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231218AbjA1ARB (ORCPT ); Fri, 27 Jan 2023 19:17:01 -0500 Received: from mail-pj1-x1036.google.com (mail-pj1-x1036.google.com [IPv6:2607:f8b0:4864:20::1036]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 169717BE71; Fri, 27 Jan 2023 16:17:00 -0800 (PST) Received: by mail-pj1-x1036.google.com with SMTP id x2-20020a17090a46c200b002295ca9855aso10221399pjg.2; Fri, 27 Jan 2023 16:17:00 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=6m4TN++jZD6FdNJ/Ga+E1HCyQ4pLOMCr/1n+B6pb86U=; b=jKNJ8NuC4o+YDO+K4XVtGLbMUMs3aK2o3rk8+j7goEVvwlzii8eL4Cx4+RVAG3dCDj 2E+sWsu12WNfyZihx45sRlOuDpAhmTyURLv0CmMy6YfEQhNStjMX1utKNv9njpEsgpXt jcj+4L5DaHMf0WbZjPZt8hIO++d9jIsMVWqlhJQgbPZE7w0lRcUizb26dL/DI0o40ys+ 4Jzvrt0h4t3mPKjwQKASoI1t4iVLBWlSev5F1CnvJdhUAdecACy7r7eA9DWrYlkYqvLl jIwwg/wK61qMP1yfM5gT7X2/KWcnltxEsJlMh8nZbmws6t8kELJIsrsZldtpHWN+L0ve 78XQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=6m4TN++jZD6FdNJ/Ga+E1HCyQ4pLOMCr/1n+B6pb86U=; b=HTtiDzJGBlIkZOQfCbLnPw88H/bQ7Vk/veDIeddA+d/WCEOq2GJ+MBHgVP3S/JjOeS 9vBjdVxHO9BsogvuNN/0ks+noDeLaJqzDj9ACQafOX+Z7xytdHeFQHxEsdlNSAHPS5bE MVW1yb4fsfiMhddGsa6fyJUIijioaxqaA/H/HvEdIc0oauT3Uka+lNrCmL+AAXEEhN67 FmfunhTRgaZVk3ZPPT0DV8jM73Ol3sgARG7UD172XdIrjcgmSgpwVldNNKnRN2/uCMIX upEdTcT4Os9hdqkFvdK3nmxCQcF5sJ2hHM9UHLk7eyzq8bf15GBeS73eWCi5IBypdl4W tNRg== X-Gm-Message-State: AO0yUKX+Wtj3djgdTaGChAhSSe2wGyFGVHF+AhvDYry/RoI+uy5dur+o CSdXVRFhpY9afMVncjx7qG4= X-Google-Smtp-Source: AK7set/5c9xMYx+ooq/lVp0WQamE+4ra85rjVxMtTFfCAINcH+qxDlh+TVUZidQqfIATcQc3vxTbPg== X-Received: by 2002:a17:90b:38cf:b0:22c:15a4:62a0 with SMTP id nn15-20020a17090b38cf00b0022c15a462a0mr10144927pjb.37.1674865019607; Fri, 27 Jan 2023 16:16:59 -0800 (PST) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id em6-20020a17090b014600b00217090ece49sm3251225pjb.31.2023.01.27.16.16.58 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 27 Jan 2023 16:16:59 -0800 (PST) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 04/30] sched: Allow sched_cgroup_fork() to fail and introduce sched_cancel_fork() Date: Fri, 27 Jan 2023 14:16:13 -1000 Message-Id: <20230128001639.3510083-5-tj@kernel.org> X-Mailer: git-send-email 2.39.1 In-Reply-To: <20230128001639.3510083-1-tj@kernel.org> References: <20230128001639.3510083-1-tj@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org A new BPF extensible sched_class will need more control over the forking process. It wants to be able to fail from sched_cgroup_fork() after the new task's sched_task_group is initialized so that the loaded BPF program can prepare the task with its cgroup association is established and reject fork if e.g. allocation fails. Allow sched_cgroup_fork() to fail by making it return int instead of void and adding sched_cancel_fork() to undo sched_fork() in the error path. sched_cgroup_fork() doesn't fail yet and this patch shouldn't cause any behavior changes. v2: Patch description updated to detail the expected use. Signed-off-by: Tejun Heo Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- include/linux/sched/task.h | 3 ++- kernel/fork.c | 15 ++++++++++----- kernel/sched/core.c | 8 +++++++- 3 files changed, 19 insertions(+), 7 deletions(-) diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index 357e0068497c..dcff721170c3 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -58,7 +58,8 @@ extern asmlinkage void schedule_tail(struct task_struct *prev); extern void init_idle(struct task_struct *idle, int cpu); extern int sched_fork(unsigned long clone_flags, struct task_struct *p); -extern void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs); +extern int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs); +extern void sched_cancel_fork(struct task_struct *p); extern void sched_post_fork(struct task_struct *p); extern void sched_dead(struct task_struct *p); diff --git a/kernel/fork.c b/kernel/fork.c index 9f7fe3541897..184c622b5513 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2239,7 +2239,7 @@ static __latent_entropy struct task_struct *copy_process( retval = perf_event_init_task(p, clone_flags); if (retval) - goto bad_fork_cleanup_policy; + goto bad_fork_sched_cancel_fork; retval = audit_alloc(p); if (retval) goto bad_fork_cleanup_perf; @@ -2380,7 +2380,9 @@ static __latent_entropy struct task_struct *copy_process( * cgroup specific, it unconditionally needs to place the task on a * runqueue. */ - sched_cgroup_fork(p, args); + retval = sched_cgroup_fork(p, args); + if (retval) + goto bad_fork_cancel_cgroup; /* * From this point on we must avoid any synchronous user-space @@ -2426,13 +2428,13 @@ static __latent_entropy struct task_struct *copy_process( /* Don't start children in a dying pid namespace */ if (unlikely(!(ns_of_pid(pid)->pid_allocated & PIDNS_ADDING))) { retval = -ENOMEM; - goto bad_fork_cancel_cgroup; + goto bad_fork_core_free; } /* Let kill terminate clone/fork in the middle */ if (fatal_signal_pending(current)) { retval = -EINTR; - goto bad_fork_cancel_cgroup; + goto bad_fork_core_free; } /* No more failure paths after this point. */ @@ -2507,10 +2509,11 @@ static __latent_entropy struct task_struct *copy_process( return p; -bad_fork_cancel_cgroup: +bad_fork_core_free: sched_core_free(p); spin_unlock(¤t->sighand->siglock); write_unlock_irq(&tasklist_lock); +bad_fork_cancel_cgroup: cgroup_cancel_fork(p, args); bad_fork_put_pidfd: if (clone_flags & CLONE_PIDFD) { @@ -2549,6 +2552,8 @@ static __latent_entropy struct task_struct *copy_process( audit_free(p); bad_fork_cleanup_perf: perf_event_free_task(p); +bad_fork_sched_cancel_fork: + sched_cancel_fork(p); bad_fork_cleanup_policy: lockdep_free_task(p); #ifdef CONFIG_NUMA diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 65f21f8bf738..49b3d8ce84ca 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4709,7 +4709,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) return 0; } -void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs) +int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs) { unsigned long flags; @@ -4736,6 +4736,12 @@ void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs) if (p->sched_class->task_fork) p->sched_class->task_fork(p); raw_spin_unlock_irqrestore(&p->pi_lock, flags); + + return 0; +} + +void sched_cancel_fork(struct task_struct *p) +{ } void sched_post_fork(struct task_struct *p) From patchwork Sat Jan 28 00:16:14 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 13119560 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 824A1C61DB3 for ; Sat, 28 Jan 2023 00:17:15 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232233AbjA1ARO (ORCPT ); Fri, 27 Jan 2023 19:17:14 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:35532 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230085AbjA1ARD (ORCPT ); Fri, 27 Jan 2023 19:17:03 -0500 Received: from mail-pl1-x62f.google.com (mail-pl1-x62f.google.com [IPv6:2607:f8b0:4864:20::62f]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 608227D994; Fri, 27 Jan 2023 16:17:02 -0800 (PST) Received: by mail-pl1-x62f.google.com with SMTP id g23so6538810plq.12; Fri, 27 Jan 2023 16:17:02 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=tFJCGyWZiDUG3qGifcKWzgbjT+7GhVFjmLYX9EINejM=; b=oL6aQNP9ZmNgM9+EnsT3Bygs83aNPidgbX/vKqfC9WVA7tE+BsP0EBMFXjvKtcxY7+ CrZ8XbEZQ8Qh7XLQiMATkbcj1PEGWDH0TdNA26OiAaH1C2uhxQ1l5cBKSC0z8Bz8g5ae 0PaGjUzn/YapB5X6+jacBo5QvOqxR0bxcMOwLuGbKM2Vs0I+4XGv4W8twq5Jidv08GqG gmbZPvRbVx/Wa6W7DS19D5TsHcDlhHBs4wlzcEcDbfD1zF+dLJQ1KLQDdnzlTFa+e4N/ /iFeudWGZizIbF5PQCqGEQzxCA6Ttx2l95Ak0yPDu4AW33Hkptmp6IcTUlWinc3EcYP0 bB3A== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=tFJCGyWZiDUG3qGifcKWzgbjT+7GhVFjmLYX9EINejM=; b=HDaO2lqsjfunTYFoV28sjr28UUDz4Jmar+derNmLC5ll8FQ14U/A0ZouzUHd5PPEWL r/wKeo82OV4Ic5pyy5Lt6eGA5826z7R5xqS9aRDZIW/WcYCfvkBH5Cu4aIWlhiwoVo9C bi9102iyWnfrEEWcRrtoPfuoRabRcWJHzA3dXBB0SpK1RCgdwTtOlfiZ5JijA3UJjcUc Vl2yK+m2KHdoLWkpg2O6ANAHjnNHGcl0wQTb2qEcYu9Y5I9NqU0TOnr+YgOcnp/Bbpyt kHMHMm70vK5WqVP5jknBhwE58aVX6KszIamaSus3L+Ea/u/as1Xj5MZAM4V3da4f/oIG bTwA== X-Gm-Message-State: AO0yUKXd+GPLng1ixKu5q3x0YkoU6/K8wIWSkYYDFFzfu/fwLGJqqDZ4 MpX+Sd9CPmg6Rk4MBNeHrRc= X-Google-Smtp-Source: AK7set8M6exx7EB8VUhHYDOI0L/aBM+C9Mwb/QU489KTVCkyAT1LOYmGWw3whLCvaRTB65e3zwxDHg== X-Received: by 2002:a17:90b:4d0e:b0:22c:4220:6ff2 with SMTP id mw14-20020a17090b4d0e00b0022c42206ff2mr5549209pjb.18.1674865021628; Fri, 27 Jan 2023 16:17:01 -0800 (PST) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id x34-20020a17090a6c2500b0022bfcf5d297sm5695710pjj.9.2023.01.27.16.17.00 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 27 Jan 2023 16:17:01 -0800 (PST) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 05/30] sched: Add sched_class->reweight_task() Date: Fri, 27 Jan 2023 14:16:14 -1000 Message-Id: <20230128001639.3510083-6-tj@kernel.org> X-Mailer: git-send-email 2.39.1 In-Reply-To: <20230128001639.3510083-1-tj@kernel.org> References: <20230128001639.3510083-1-tj@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org Currently, during a task weight change, sched core directly calls reweight_task() defined in fair.c if @p is on CFS. Let's make it a proper sched_class operation instead. CFS's reweight_task() is renamed to reweight_task_fair() and now called through sched_class. While it turns a direct call into an indirect one, set_load_weight() isn't called from a hot path and this change shouldn't cause any noticeable difference. This will be used to implement reweight_task for a new BPF extensible sched_class so that it can keep its cached task weight up-to-date. This will be used by a new sched_class to track weight changes. Signed-off-by: Tejun Heo Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- kernel/sched/core.c | 4 ++-- kernel/sched/fair.c | 3 ++- kernel/sched/sched.h | 4 ++-- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 49b3d8ce84ca..ce27ed857975 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1275,8 +1275,8 @@ static void set_load_weight(struct task_struct *p, bool update_load) * SCHED_OTHER tasks have to update their load when changing their * weight */ - if (update_load && p->sched_class == &fair_sched_class) { - reweight_task(p, prio); + if (update_load && p->sched_class->reweight_task) { + p->sched_class->reweight_task(task_rq(p), p, prio); } else { load->weight = scale_load(sched_prio_to_weight[prio]); load->inv_weight = sched_prio_to_wmult[prio]; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index c36aa54ae071..fbedf99ed953 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3342,7 +3342,7 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, } -void reweight_task(struct task_struct *p, int prio) +static void reweight_task_fair(struct rq *rq, struct task_struct *p, int prio) { struct sched_entity *se = &p->se; struct cfs_rq *cfs_rq = cfs_rq_of(se); @@ -12413,6 +12413,7 @@ DEFINE_SCHED_CLASS(fair) = { .task_tick = task_tick_fair, .task_fork = task_fork_fair, + .reweight_task = reweight_task_fair, .prio_changed = prio_changed_fair, .switched_from = switched_from_fair, .switched_to = switched_to_fair, diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 771f8ddb7053..070603f00470 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2208,6 +2208,8 @@ struct sched_class { */ void (*switched_from)(struct rq *this_rq, struct task_struct *task); void (*switched_to) (struct rq *this_rq, struct task_struct *task); + void (*reweight_task)(struct rq *this_rq, struct task_struct *task, + int newprio); void (*prio_changed) (struct rq *this_rq, struct task_struct *task, int oldprio); @@ -2360,8 +2362,6 @@ extern void init_sched_dl_class(void); extern void init_sched_rt_class(void); extern void init_sched_fair_class(void); -extern void reweight_task(struct task_struct *p, int prio); - extern void resched_curr(struct rq *rq); extern void resched_cpu(int cpu); From patchwork Sat Jan 28 00:16:15 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 13119561 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 72CA9C61DA7 for ; Sat, 28 Jan 2023 00:17:18 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232583AbjA1ARQ (ORCPT ); Fri, 27 Jan 2023 19:17:16 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:36188 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231599AbjA1ARN (ORCPT ); Fri, 27 Jan 2023 19:17:13 -0500 Received: from mail-pj1-x1035.google.com (mail-pj1-x1035.google.com [IPv6:2607:f8b0:4864:20::1035]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 345F27D981; Fri, 27 Jan 2023 16:17:04 -0800 (PST) Received: by mail-pj1-x1035.google.com with SMTP id m7-20020a17090a71c700b0022c0c070f2eso9527127pjs.4; Fri, 27 Jan 2023 16:17:04 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=Vz8WtJpJcNM2R282wYEgyHbfluHq5aIu4Y0bgBQVYB0=; b=o/lUjJX4jwOKAgmQuZRpkxVJZIJy4TAponBdxsIIjYFmGELGxNZDavIPVgC+llz1/O XgZpEr/Un7kCUaCnQ6RndnRoQwXA6gIUshikb00U9eUNl6YxTslqtKdbpoZx9H4YjQVY nnHS768UFgQqFjYTARV1wrrXTRc9wMVGEKQGs9/O1QRUGYwDsLT0zvtAzVePsDRbkIny RBoDyw3HeECu43QRbptCqc0DzP23+oIv9U4DbVcbod8vKw8kfjAe6Z7E0sSdh/PU26nm pzmV414mb8+0C2vJqQBoJVL1WNzkC45nrnHndpK31Y9khp4N8p0AgQ3CXGYQBYN1DbGj ZMMw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=Vz8WtJpJcNM2R282wYEgyHbfluHq5aIu4Y0bgBQVYB0=; b=SMSc1DJr/Bqx8UquOvW7XDvbicG//27BRUF2doi/KTKuWWxfl2oCrR+dnm9GhvTtf4 3NEJL9z2eNWxlNffWMj1aWpH9WtLsu8eanBTWRH966xeRwDJ2jwoUxkydQd1Lewgv4sb OtinwbNlRkfFoexZblmwUkJAp1PTUymw0iDNl78ZRlbuURMs6NsI6PgsszTOVfB9RbFV Y3sk1tNocaW5huYSqFtcifFyyXY1I7UHMZhZcmjTkr60R7gBzC0ENyFeircKvBkQw8ER KZUtwZ+VYFq8h8zv9vpzP3AJqvwF0Ws7iAJg2w1a1jJQQ4XGKtVXf7CUX778Jr6L4XAJ HEPQ== X-Gm-Message-State: AFqh2kp39ag7k5/ctv8Zt/SVWDE74Hg92T+brt4Jx1y+vgIUDJkjTqfs /CjGKY8NS+tCDhlxPpJwTUk= X-Google-Smtp-Source: AMrXdXvEz6bN1nYBgS9BQqpQsG8E5P2mIxt21YaOBnceea7QUwDZivIt3IGbGgbugfQmGtuQ6jhSzg== X-Received: by 2002:a05:6a21:3a96:b0:ad:e5e8:cfe8 with SMTP id zv22-20020a056a213a9600b000ade5e8cfe8mr47195967pzb.48.1674865023391; Fri, 27 Jan 2023 16:17:03 -0800 (PST) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id c10-20020aa78c0a000000b00582bdaab584sm3202026pfd.81.2023.01.27.16.17.02 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 27 Jan 2023 16:17:03 -0800 (PST) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 06/30] sched: Add sched_class->switching_to() and expose check_class_changing/changed() Date: Fri, 27 Jan 2023 14:16:15 -1000 Message-Id: <20230128001639.3510083-7-tj@kernel.org> X-Mailer: git-send-email 2.39.1 In-Reply-To: <20230128001639.3510083-1-tj@kernel.org> References: <20230128001639.3510083-1-tj@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org When a task switches to a new sched_class, the prev and new classes are notified through ->switched_from() and ->switched_to(), respectively, after the switching is done. A new BPF extensible sched_class will have callbacks that allow the BPF scheduler to keep track of relevant task states (like priority and cpumask). Those callbacks aren't called while a task is on a different sched_class. When a task comes back, we wanna tell the BPF progs the up-to-date state before the task gets enqueued, so we need a hook which is called before the switching is committed. This patch adds ->switching_to() which is called during sched_class switch through check_class_changing() before the task is restored. Also, this patch exposes check_class_changing/changed() in kernel/sched/sched.h. They will be used by the new BPF extensible sched_class to implement implicit sched_class switching which is used e.g. when falling back to CFS when the BPF scheduler fails or unloads. This is a prep patch and doesn't cause any behavior changes. The new operation and exposed functions aren't used yet. v2: Improve patch description w/ details on planned use. Signed-off-by: Tejun Heo Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- kernel/sched/core.c | 20 +++++++++++++++++--- kernel/sched/sched.h | 7 +++++++ 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index ce27ed857975..e6d5374edf58 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2221,6 +2221,17 @@ inline int task_curr(const struct task_struct *p) return cpu_curr(task_cpu(p)) == p; } +/* + * ->switching_to() is called with the pi_lock and rq_lock held and must not + * mess with locking. + */ +void check_class_changing(struct rq *rq, struct task_struct *p, + const struct sched_class *prev_class) +{ + if (prev_class != p->sched_class && p->sched_class->switching_to) + p->sched_class->switching_to(rq, p); +} + /* * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock, * use the balance_callback list if you want balancing. @@ -2228,9 +2239,9 @@ inline int task_curr(const struct task_struct *p) * this means any call to check_class_changed() must be followed by a call to * balance_callback(). */ -static inline void check_class_changed(struct rq *rq, struct task_struct *p, - const struct sched_class *prev_class, - int oldprio) +void check_class_changed(struct rq *rq, struct task_struct *p, + const struct sched_class *prev_class, + int oldprio) { if (prev_class != p->sched_class) { if (prev_class->switched_from) @@ -7072,6 +7083,7 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) } __setscheduler_prio(p, prio); + check_class_changing(rq, p, prev_class); } check_class_changed(rq, p, prev_class, oldprio); @@ -7681,6 +7693,8 @@ static int __sched_setscheduler(struct task_struct *p, } __setscheduler_uclamp(p, attr); + check_class_changing(rq, p, prev_class); + /* * We enqueue to tail when the priority of a task is * increased (user space view). diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 070603f00470..c083395c5477 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2206,6 +2206,7 @@ struct sched_class { * cannot assume the switched_from/switched_to pair is serialized by * rq->lock. They are however serialized by p->pi_lock. */ + void (*switching_to) (struct rq *this_rq, struct task_struct *task); void (*switched_from)(struct rq *this_rq, struct task_struct *task); void (*switched_to) (struct rq *this_rq, struct task_struct *task); void (*reweight_task)(struct rq *this_rq, struct task_struct *task, @@ -2442,6 +2443,12 @@ static inline void sub_nr_running(struct rq *rq, unsigned count) extern void activate_task(struct rq *rq, struct task_struct *p, int flags); extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); +extern void check_class_changing(struct rq *rq, struct task_struct *p, + const struct sched_class *prev_class); +extern void check_class_changed(struct rq *rq, struct task_struct *p, + const struct sched_class *prev_class, + int oldprio); + extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); #ifdef CONFIG_PREEMPT_RT From patchwork Sat Jan 28 00:16:16 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 13119562 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id D4E12C54EAA for ; Sat, 28 Jan 2023 00:17:38 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232488AbjA1ARh (ORCPT ); Fri, 27 Jan 2023 19:17:37 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:36120 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232489AbjA1ARQ (ORCPT ); Fri, 27 Jan 2023 19:17:16 -0500 Received: from mail-pl1-x62d.google.com (mail-pl1-x62d.google.com [IPv6:2607:f8b0:4864:20::62d]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 1B3CE8715D; Fri, 27 Jan 2023 16:17:06 -0800 (PST) Received: by mail-pl1-x62d.google.com with SMTP id be8so6560590plb.7; Fri, 27 Jan 2023 16:17:06 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=p0ISkd2T3J7+l1Hznoksx6Dz5SLKHYAGnyxSr5SorAg=; b=MK1SussAVC9VsvXsIZvk0OjW/ed1lvSvUxGixEar2Sy0RF/cl4fhxsD+2NdXlImBty VR7iXwvd4TZvPFnKOYjHy57B//qYEC+aLMnzKUmovL9+3smuVjKt/DtE32tpmYdYnexj DchV2Kxh8kk8QrckTp0Pq6hBc3nFu2lC7i6u76IIfWiqCHYNpuGHmaR6IS0/b+cn8mSa +GKAqofMJd2abEuMyzFcPhjtU2F8lFxZCdBvzae+GN7ThvT0OCsc66OSyXvrsQKkIQWq lHSh+ufkLX/NmOxxuhpmwpKwTpQmF+IhKwcahtRGm5iOMF9ofN0CHZe7QRF5w+tTqQQG 7bdw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=p0ISkd2T3J7+l1Hznoksx6Dz5SLKHYAGnyxSr5SorAg=; b=pTs+0vYLbnyIfmmHbmajmHi8ZlTiXM1oCaygi46tV7snclGzoXKil7FHuIbNRIK600 8y4eugmFIMz77aqS4O3Ntat3DSgXOOamUHMPQmUTZr2dFt3cN5R9QCQiCiwNBwMVDZ67 KIx1yOFjWFOUF5hhPESL0Uibh8UzLp25q6afY25RtiuClols4j3ZdBv31+j8nks/52BM HXFfxW+bOY47KUWlvHgcQweB7CUEOcOG4iau1bQ/iAFRJiZROfAj+MxLh1/L4FgVJUkb qaeBbyG7AfJhle6v6xSHCVQSrMFS9cG7vHNWjdZAboFMADDGTqAMoCLl2geOeR7sWFuz X18g== X-Gm-Message-State: AO0yUKVLOFoG+n/BXEq6dWtLf7jAqO1mIKaxesYZX69jeyIX4e4dr537 3gMxDXhKvctqKa7sWPu3hhE= X-Google-Smtp-Source: AK7set9j6RBMzGQqGuJwGP5rOysHXD9GY3gpPgKfEM3VsY2nKvk1CydU6zeWE2koA2ZJ1OWgnmJf2A== X-Received: by 2002:a17:902:dacf:b0:196:2bf1:b68d with SMTP id q15-20020a170902dacf00b001962bf1b68dmr14009108plx.14.1674865025163; Fri, 27 Jan 2023 16:17:05 -0800 (PST) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id ik26-20020a170902ab1a00b001895f7c8a71sm3408287plb.97.2023.01.27.16.17.04 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 27 Jan 2023 16:17:04 -0800 (PST) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 07/30] sched: Factor out cgroup weight conversion functions Date: Fri, 27 Jan 2023 14:16:16 -1000 Message-Id: <20230128001639.3510083-8-tj@kernel.org> X-Mailer: git-send-email 2.39.1 In-Reply-To: <20230128001639.3510083-1-tj@kernel.org> References: <20230128001639.3510083-1-tj@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org Factor out sched_weight_from/to_cgroup() which convert between scheduler shares and cgroup weight. No functional change. The factored out functions will be used by a new BPF extensible sched_class so that the weights can be exposed to the BPF programs in a way which is consistent cgroup weights and easier to interpret. Signed-off-by: Tejun Heo Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- kernel/sched/core.c | 28 +++++++++++++--------------- kernel/sched/sched.h | 18 ++++++++++++++++++ 2 files changed, 31 insertions(+), 15 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e6d5374edf58..70698725f6de 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -11072,29 +11072,27 @@ static int cpu_extra_stat_show(struct seq_file *sf, } #ifdef CONFIG_FAIR_GROUP_SCHED + +static unsigned long tg_weight(struct task_group *tg) +{ + return scale_load_down(tg->shares); +} + static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css, struct cftype *cft) { - struct task_group *tg = css_tg(css); - u64 weight = scale_load_down(tg->shares); - - return DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024); + return sched_weight_to_cgroup(tg_weight(css_tg(css))); } static int cpu_weight_write_u64(struct cgroup_subsys_state *css, - struct cftype *cft, u64 weight) + struct cftype *cft, u64 cgrp_weight) { - /* - * cgroup weight knobs should use the common MIN, DFL and MAX - * values which are 1, 100 and 10000 respectively. While it loses - * a bit of range on both ends, it maps pretty well onto the shares - * value used by scheduler and the round-trip conversions preserve - * the original value over the entire range. - */ - if (weight < CGROUP_WEIGHT_MIN || weight > CGROUP_WEIGHT_MAX) + unsigned long weight; + + if (cgrp_weight < CGROUP_WEIGHT_MIN || cgrp_weight > CGROUP_WEIGHT_MAX) return -ERANGE; - weight = DIV_ROUND_CLOSEST_ULL(weight * 1024, CGROUP_WEIGHT_DFL); + weight = sched_weight_from_cgroup(cgrp_weight); return sched_group_set_shares(css_tg(css), scale_load(weight)); } @@ -11102,7 +11100,7 @@ static int cpu_weight_write_u64(struct cgroup_subsys_state *css, static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css, struct cftype *cft) { - unsigned long weight = scale_load_down(css_tg(css)->shares); + unsigned long weight = tg_weight(css_tg(css)); int last_delta = INT_MAX; int prio, delta; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index c083395c5477..946fdb51b6e6 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -435,6 +435,24 @@ struct task_group { #define MAX_SHARES (1UL << 18) #endif +/* + * cgroup weight knobs should use the common MIN, DFL and MAX values which are + * 1, 100 and 10000 respectively. While it loses a bit of range on both ends, it + * maps pretty well onto the shares value used by scheduler and the round-trip + * conversions preserve the original value over the entire range. + */ +static inline unsigned long sched_weight_from_cgroup(unsigned long cgrp_weight) +{ + return DIV_ROUND_CLOSEST_ULL(cgrp_weight * 1024, CGROUP_WEIGHT_DFL); +} + +static inline unsigned long sched_weight_to_cgroup(unsigned long weight) +{ + return clamp_t(unsigned long, + DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024), + CGROUP_WEIGHT_MIN, CGROUP_WEIGHT_MAX); +} + typedef int (*tg_visitor)(struct task_group *, void *); extern int walk_tg_tree_from(struct task_group *from, From patchwork Sat Jan 28 00:16:17 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 13119563 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 805A0C38142 for ; Sat, 28 Jan 2023 00:17:40 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232679AbjA1ARj (ORCPT ); Fri, 27 Jan 2023 19:17:39 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:36234 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231615AbjA1ARW (ORCPT ); Fri, 27 Jan 2023 19:17:22 -0500 Received: from mail-pf1-x42b.google.com (mail-pf1-x42b.google.com [IPv6:2607:f8b0:4864:20::42b]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id A210A8B7BB; Fri, 27 Jan 2023 16:17:07 -0800 (PST) Received: by mail-pf1-x42b.google.com with SMTP id cr11so2676595pfb.1; Fri, 27 Jan 2023 16:17:07 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=GaSH0lBUFNrOuEwNNJQpHJ3+/E2VBdXbaH8UuhQdXnk=; b=SChcJ3P1SUB48iOxqJmg6K6hG+0FV9tVxBj7k+F5YGOG99MKTpCsmpX49tCTP/ioaY MCxE8hP9mqp98dJyDzHgTG+NNhjNBzJ8xG7FhC3H+07fyDDyUaN39ZFCS/cxBvJOGAnj sAOdTGs7v0WimDoqCnPR9zooYTc3sR4Q6ndNQDpJSKgGTo3bF4c4qjMpfQWgkdA9oJsr /tHrpCxLUq1VPCOnlc+ET4aTM4FRyJtcHgEtURv62/vR0sszLQ4uy5IiFb5EIKlxIYDd j99kJxjNwrlf5KRDDebLvN7/+Xh2CdvxGrPbRz3xBQx96tIC0jQWfyRp8AEj6qJAFqOb xx8g== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=GaSH0lBUFNrOuEwNNJQpHJ3+/E2VBdXbaH8UuhQdXnk=; b=X1SaHr/TRzi13iYObfWR1HrgEWFuZ3xcNU7dTrp+PR6GyTzB7uqNAmuxTSDVzbwChg nWmjJb7RVwRtqBy7HWU4IuhHYcQx5KJ0Ygqlh8yAXxSZoWADqziTIrwJR7wnXxaLeg6O nIaFuPe6ISlGlJTzN1EfTuXGUtlRFuEGYkgvFyEtRAgbR3RuFAHD/k1M2ZCbjVPnxnzF dPaCou/v+d3KOrCUrpjgWUKVdqDr6E8KqMgCeneCDNe5mHllNZzxIdb3LaZN1ZrfuH9g HeuEiRVOcWqCa8BiBit9N0eo8sjbE1A70rda1tNEipx5YCTQnLi+0uTz2NpTwwGgCBWJ 5tEQ== X-Gm-Message-State: AFqh2kqXa+lMv90acd0OMuAJCSN0VOhVRAyY8kTIj1XAa6v11pm7efKm 931Kp7lr6CgN/mIiUdPhWP8= X-Google-Smtp-Source: AMrXdXurwcWVtNJVJsH2qtPHQTUYmFSpKN9fFu84p1bisJ6AWnekB/kpcCS+Cdfmggdx4iqLUQgXcA== X-Received: by 2002:a62:4e8e:0:b0:580:fb8e:3044 with SMTP id c136-20020a624e8e000000b00580fb8e3044mr43420417pfb.22.1674865026877; Fri, 27 Jan 2023 16:17:06 -0800 (PST) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id br13-20020a056a00440d00b00581ad007a9fsm3154791pfb.153.2023.01.27.16.17.06 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 27 Jan 2023 16:17:06 -0800 (PST) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 08/30] sched: Expose css_tg(), __setscheduler_prio() and SCHED_CHANGE_BLOCK() Date: Fri, 27 Jan 2023 14:16:17 -1000 Message-Id: <20230128001639.3510083-9-tj@kernel.org> X-Mailer: git-send-email 2.39.1 In-Reply-To: <20230128001639.3510083-1-tj@kernel.org> References: <20230128001639.3510083-1-tj@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org These will be used by a new BPF extensible sched_class. css_tg() will be used in the init and exit paths to visit all task_groups by walking cgroups. __setscheduler_prio() is used to pick the sched_class matching the current prio of the task. For the new BPF extensible sched_class, the mapping from the task configuration to sched_class isn't static and depends on a few factors - e.g. whether the BPF progs implementing the scheduler are loaded and in a serviceable state. That mapping logic will be added to __setscheduler_prio(). When the BPF scheduler progs get loaded and unloaded, the mapping changes and the new sched_class will walk the tasks applying the new mapping using SCHED_CHANGE_BLOCK() and __setscheduler_prio(). v2: Expose SCHED_CHANGE_BLOCK() too and update the description. Signed-off-by: Tejun Heo Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- kernel/sched/core.c | 47 +++---------------------------------------- kernel/sched/sched.h | 48 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+), 44 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 70698725f6de..1fd4e2cde35c 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2094,15 +2094,7 @@ void deactivate_task(struct rq *rq, struct task_struct *p, int flags) dequeue_task(rq, p, flags); } -struct sched_change_guard { - struct task_struct *p; - struct rq *rq; - bool queued; - bool running; - bool done; -}; - -static struct sched_change_guard +struct sched_change_guard sched_change_guard_init(struct rq *rq, struct task_struct *p, int flags) { struct sched_change_guard cg = { @@ -2127,7 +2119,7 @@ sched_change_guard_init(struct rq *rq, struct task_struct *p, int flags) return cg; } -static void sched_change_guard_fini(struct sched_change_guard *cg, int flags) +void sched_change_guard_fini(struct sched_change_guard *cg, int flags) { if (cg->queued) enqueue_task(cg->rq, cg->p, flags | ENQUEUE_NOCLOCK); @@ -2136,34 +2128,6 @@ static void sched_change_guard_fini(struct sched_change_guard *cg, int flags) cg->done = true; } -/** - * SCHED_CHANGE_BLOCK - Nested block for task attribute updates - * @__rq: Runqueue the target task belongs to - * @__p: Target task - * @__flags: DEQUEUE/ENQUEUE_* flags - * - * A task may need to be dequeued and put_prev_task'd for attribute updates and - * set_next_task'd and re-enqueued afterwards. This helper defines a nested - * block which automatically handles these preparation and cleanup operations. - * - * SCHED_CHANGE_BLOCK(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK) { - * update_attribute(p); - * ... - * } - * - * If @__flags is a variable, the variable may be updated in the block body and - * the updated value will be used when re-enqueueing @p. - * - * If %DEQUEUE_NOCLOCK is specified, the caller is responsible for calling - * update_rq_clock() beforehand. Otherwise, the rq clock is automatically - * updated iff the task needs to be dequeued and re-enqueued. Only the former - * case guarantees that the rq clock is up-to-date inside and after the block. - */ -#define SCHED_CHANGE_BLOCK(__rq, __p, __flags) \ - for (struct sched_change_guard __cg = \ - sched_change_guard_init(__rq, __p, __flags); \ - !__cg.done; sched_change_guard_fini(&__cg, __flags)) - static inline int __normal_prio(int policy, int rt_prio, int nice) { int prio; @@ -6949,7 +6913,7 @@ int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flag } EXPORT_SYMBOL(default_wake_function); -static void __setscheduler_prio(struct task_struct *p, int prio) +void __setscheduler_prio(struct task_struct *p, int prio) { if (dl_prio(prio)) p->sched_class = &dl_sched_class; @@ -10340,11 +10304,6 @@ void sched_move_task(struct task_struct *tsk) task_rq_unlock(rq, tsk, &rf); } -static inline struct task_group *css_tg(struct cgroup_subsys_state *css) -{ - return css ? container_of(css, struct task_group, css) : NULL; -} - static struct cgroup_subsys_state * cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) { diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 946fdb51b6e6..1927adc6c4bb 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -469,6 +469,11 @@ static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) return walk_tg_tree_from(&root_task_group, down, up, data); } +static inline struct task_group *css_tg(struct cgroup_subsys_state *css) +{ + return css ? container_of(css, struct task_group, css) : NULL; +} + extern int tg_nop(struct task_group *tg, void *data); extern void free_fair_sched_group(struct task_group *tg); @@ -493,6 +498,8 @@ extern long sched_group_rt_runtime(struct task_group *tg); extern long sched_group_rt_period(struct task_group *tg); extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); +extern void __setscheduler_prio(struct task_struct *p, int prio); + extern struct task_group *sched_create_group(struct task_group *parent); extern void sched_online_group(struct task_group *tg, struct task_group *parent); @@ -2461,6 +2468,47 @@ static inline void sub_nr_running(struct rq *rq, unsigned count) extern void activate_task(struct rq *rq, struct task_struct *p, int flags); extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); +struct sched_change_guard { + struct task_struct *p; + struct rq *rq; + bool queued; + bool running; + bool done; +}; + +extern struct sched_change_guard +sched_change_guard_init(struct rq *rq, struct task_struct *p, int flags); + +extern void sched_change_guard_fini(struct sched_change_guard *cg, int flags); + +/** + * SCHED_CHANGE_BLOCK - Nested block for task attribute updates + * @__rq: Runqueue the target task belongs to + * @__p: Target task + * @__flags: DEQUEUE/ENQUEUE_* flags + * + * A task may need to be dequeued and put_prev_task'd for attribute updates and + * set_next_task'd and re-enqueued afterwards. This helper defines a nested + * block which automatically handles these preparation and cleanup operations. + * + * SCHED_CHANGE_BLOCK(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK) { + * update_attribute(p); + * ... + * } + * + * If @__flags is a variable, the variable may be updated in the block body and + * the updated value will be used when re-enqueueing @p. + * + * If %DEQUEUE_NOCLOCK is specified, the caller is responsible for calling + * update_rq_clock() beforehand. Otherwise, the rq clock is automatically + * updated iff the task needs to be dequeued and re-enqueued. Only the former + * case guarantees that the rq clock is up-to-date inside and after the block. + */ +#define SCHED_CHANGE_BLOCK(__rq, __p, __flags) \ + for (struct sched_change_guard __cg = \ + sched_change_guard_init(__rq, __p, __flags); \ + !__cg.done; sched_change_guard_fini(&__cg, __flags)) + extern void check_class_changing(struct rq *rq, struct task_struct *p, const struct sched_class *prev_class); extern void check_class_changed(struct rq *rq, struct task_struct *p, From patchwork Sat Jan 28 00:16:18 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 13119564 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 5CCF8C38142 for ; Sat, 28 Jan 2023 00:17:56 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232774AbjA1ARy (ORCPT ); Fri, 27 Jan 2023 19:17:54 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:36720 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232778AbjA1ARg (ORCPT ); Fri, 27 Jan 2023 19:17:36 -0500 Received: from mail-pj1-x1030.google.com (mail-pj1-x1030.google.com [IPv6:2607:f8b0:4864:20::1030]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 9530F8CE07; Fri, 27 Jan 2023 16:17:09 -0800 (PST) Received: by mail-pj1-x1030.google.com with SMTP id w6-20020a17090ac98600b0022c58cc7a18so1747400pjt.1; Fri, 27 Jan 2023 16:17:09 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=A5onzYI/BoYPJ5RTBoK9F833IAc/xj+dSgz/8Wr9CLE=; b=MsBTe2ED10UEbb4ZWanyUT51HInOMEhoF46KLis73uMOpkqCJB3XbReyi0AA6zU1On 7CWM8N3bBkmEwZ+QOfMl9YUr2Jj0qocgMj66AQGt6GfcgElhhngx9u0fUxrAOEzbFhco b2QzM9dyE6iFnyr+6rHJuuKSQ3a861T6dW+LZ7ipPNGqfQ37OAnDtwzelkpo+68dP4+5 o93YqOj+tAgarsQqh+Mj02/k41PCuKjXeR0/DEDTxV7aQvVrTlz60eF3nGIBEEiBooj3 SOzGB2usd7fEiM5yAZtcw0rAGF1G6ZjQGey9uAa+aBxoqVrAlsA3OI3Ibak0FfqZPRrt FhxA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=A5onzYI/BoYPJ5RTBoK9F833IAc/xj+dSgz/8Wr9CLE=; b=XwCEvmU5CQWCJRDSXQSHNtfNHVau1KcE0RTw/vC7XODJJIYB77tzSFx9ljyy51ga8Y PFIxzijGSATe9sMitDxMWGFCfClACJS5CzFMzvloEGs9NYTUEU2eDX8yHIRH2hFmBYX5 vu9l5fFC2LroWqwxEFTetjIJZ9KSsEfe10VhJ+kt/Q6H0FXWZnGL7Rj/Kb7l6VLcKQ3T VepV8kzYudo0GlSDc0oFW+NfQ1Vfg9VFodNcExNTeYf1xeW802EV/NrG+A8xNtdQfRwD Eks93o0rNLB6+l8x70Uh73C2xR+3uDNb2GqOfKQZMHt1pQ4yhHTBngyoS+tLZarU9/m8 SjXA== X-Gm-Message-State: AO0yUKVwtHl/pWRUBJAKVxQq+ybEdLMJUBIdkcW/cFvZK27s9Cuc33U1 QIY/3WPJl5pLRO46itd3YwA= X-Google-Smtp-Source: AK7set8NNnptk7HMvEl0rpRIke72trhWugiUNgz0ZnYalNWo7QMB/vOFdGl4UQ1pVokiuaeyA+b1tA== X-Received: by 2002:a17:903:2093:b0:192:ef8e:4258 with SMTP id d19-20020a170903209300b00192ef8e4258mr148224plc.14.1674865028680; Fri, 27 Jan 2023 16:17:08 -0800 (PST) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id 13-20020a170902c14d00b00194706d3f25sm3423369plj.144.2023.01.27.16.17.08 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 27 Jan 2023 16:17:08 -0800 (PST) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 09/30] sched: Enumerate CPU cgroup file types Date: Fri, 27 Jan 2023 14:16:18 -1000 Message-Id: <20230128001639.3510083-10-tj@kernel.org> X-Mailer: git-send-email 2.39.1 In-Reply-To: <20230128001639.3510083-1-tj@kernel.org> References: <20230128001639.3510083-1-tj@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org Rename cpu[_legacy]_files to cpu[_legacy]_cftypes for clarity and add cpu_cftype_id which enumerates every cgroup2 interface file type. This doesn't make any functional difference now. The enums will be used to access specific cftypes by a new BPF extensible sched_class to selectively show and hide CPU controller interface files depending on the capability of the currently loaded BPF scheduler progs. Signed-off-by: Tejun Heo Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- kernel/sched/core.c | 22 +++++++++++----------- kernel/sched/sched.h | 21 +++++++++++++++++++++ 2 files changed, 32 insertions(+), 11 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 1fd4e2cde35c..729de63ee6ec 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -10941,7 +10941,7 @@ static int cpu_idle_write_s64(struct cgroup_subsys_state *css, } #endif -static struct cftype cpu_legacy_files[] = { +static struct cftype cpu_legacy_cftypes[] = { #ifdef CONFIG_FAIR_GROUP_SCHED { .name = "shares", @@ -11148,21 +11148,21 @@ static ssize_t cpu_max_write(struct kernfs_open_file *of, } #endif -static struct cftype cpu_files[] = { +struct cftype cpu_cftypes[CPU_CFTYPE_CNT + 1] = { #ifdef CONFIG_FAIR_GROUP_SCHED - { + [CPU_CFTYPE_WEIGHT] = { .name = "weight", .flags = CFTYPE_NOT_ON_ROOT, .read_u64 = cpu_weight_read_u64, .write_u64 = cpu_weight_write_u64, }, - { + [CPU_CFTYPE_WEIGHT_NICE] = { .name = "weight.nice", .flags = CFTYPE_NOT_ON_ROOT, .read_s64 = cpu_weight_nice_read_s64, .write_s64 = cpu_weight_nice_write_s64, }, - { + [CPU_CFTYPE_IDLE] = { .name = "idle", .flags = CFTYPE_NOT_ON_ROOT, .read_s64 = cpu_idle_read_s64, @@ -11170,13 +11170,13 @@ static struct cftype cpu_files[] = { }, #endif #ifdef CONFIG_CFS_BANDWIDTH - { + [CPU_CFTYPE_MAX] = { .name = "max", .flags = CFTYPE_NOT_ON_ROOT, .seq_show = cpu_max_show, .write = cpu_max_write, }, - { + [CPU_CFTYPE_MAX_BURST] = { .name = "max.burst", .flags = CFTYPE_NOT_ON_ROOT, .read_u64 = cpu_cfs_burst_read_u64, @@ -11184,13 +11184,13 @@ static struct cftype cpu_files[] = { }, #endif #ifdef CONFIG_UCLAMP_TASK_GROUP - { + [CPU_CFTYPE_UCLAMP_MIN] = { .name = "uclamp.min", .flags = CFTYPE_NOT_ON_ROOT, .seq_show = cpu_uclamp_min_show, .write = cpu_uclamp_min_write, }, - { + [CPU_CFTYPE_UCLAMP_MAX] = { .name = "uclamp.max", .flags = CFTYPE_NOT_ON_ROOT, .seq_show = cpu_uclamp_max_show, @@ -11210,8 +11210,8 @@ struct cgroup_subsys cpu_cgrp_subsys = { .can_attach = cpu_cgroup_can_attach, #endif .attach = cpu_cgroup_attach, - .legacy_cftypes = cpu_legacy_files, - .dfl_cftypes = cpu_files, + .legacy_cftypes = cpu_legacy_cftypes, + .dfl_cftypes = cpu_cftypes, .early_init = true, .threaded = true, }; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 1927adc6c4bb..dd567eb7881a 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -3334,4 +3334,25 @@ static inline void update_current_exec_runtime(struct task_struct *curr, cgroup_account_cputime(curr, delta_exec); } +#ifdef CONFIG_CGROUP_SCHED +enum cpu_cftype_id { +#ifdef CONFIG_FAIR_GROUP_SCHED + CPU_CFTYPE_WEIGHT, + CPU_CFTYPE_WEIGHT_NICE, + CPU_CFTYPE_IDLE, +#endif +#ifdef CONFIG_CFS_BANDWIDTH + CPU_CFTYPE_MAX, + CPU_CFTYPE_MAX_BURST, +#endif +#ifdef CONFIG_UCLAMP_TASK_GROUP + CPU_CFTYPE_UCLAMP_MIN, + CPU_CFTYPE_UCLAMP_MAX, +#endif + CPU_CFTYPE_CNT, +}; + +extern struct cftype cpu_cftypes[CPU_CFTYPE_CNT + 1]; +#endif /* CONFIG_CGROUP_SCHED */ + #endif /* _KERNEL_SCHED_SCHED_H */ From patchwork Sat Jan 28 00:16:19 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 13119565 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 5E6AFC38142 for ; Sat, 28 Jan 2023 00:18:01 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232821AbjA1AR7 (ORCPT ); Fri, 27 Jan 2023 19:17:59 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:36748 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232845AbjA1ARi (ORCPT ); Fri, 27 Jan 2023 19:17:38 -0500 Received: from mail-pl1-x632.google.com (mail-pl1-x632.google.com [IPv6:2607:f8b0:4864:20::632]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 3750986E8C; Fri, 27 Jan 2023 16:17:11 -0800 (PST) Received: by mail-pl1-x632.google.com with SMTP id k13so6628405plg.0; Fri, 27 Jan 2023 16:17:11 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=Dl4nTEcWT/EGt93/xB46mljQZl+Kfpo6bPKOqUFVnUs=; b=Wua+ST9qq6Cqyei+11te6FyZRDXjo5UT/RIR1Rcxx7zCNHjCwC1mZ65ZSRNi+h/zn0 8aDUYErsynJLoGb26hgy0tMd6+LvXFBe048ECF8/c5DL+JqCYK+eAVvyue83UtzqJ9Tx Mk/4UrBWLDYMQZKcIbvFYmg8w17P008cmiaO85YW6yhXnddFpd8EZEdVpJbJHxxRTSaN NwdhY4/21e97ysr15kYII9kABWwA9H/u7ly1v+Wt6ga4rx/f2s6CMCO0FEka4KHxWs2V PW8X3bfQPLFiigpZe2sCIljxv3PpLlXa1oMklE5i6i1L8vKirIf1yTh0WzFpSmqo+NZ3 l/5Q== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=Dl4nTEcWT/EGt93/xB46mljQZl+Kfpo6bPKOqUFVnUs=; b=dD5asuT881+gv1SU9YkDNODKtNGjQunCrVNjy5H9QaYVjoH+BTHpNdF9SBBTs0FmT9 O8chIW+11+IjfOgma+lEGMuGxDNT1BmtGR/jqfHtOXtQ/Kxp4VlE4BRXuNEFSI35quou a2O/xabudELWv78RZL9QopfvptdMS35RTyEJXmysLF5FgY+O8KVpIwj7+BhhShPqcG+G PqqY7nz5SSjusmkSxHICb0+XiKQg5Fp8Qb8IQg+g8qO+yjpi6AiByB4s239WJo5DuQF5 /bgmtz8EyiDBBqfUXcB84QmwF8AVxtAPbuo1rq7wPRqxNzeZLlTqrGEYY4m3e4t2KjUO m6mQ== X-Gm-Message-State: AO0yUKXXUnnfFauPOqlPA/6CIWZJ9hiNgQ55WpdJACD6EU4gTbIjBPmE XsI1MFx7nlPyVSwN2CdookM= X-Google-Smtp-Source: AK7set9h615/DPijuRyydm9F5Rv37JrFWXN0i7SXx+orPogX0j5VbRdQi01tLv229SG1SJWqzEDZCg== X-Received: by 2002:a17:902:db04:b0:196:5640:b081 with SMTP id m4-20020a170902db0400b001965640b081mr5283694plx.7.1674865030438; Fri, 27 Jan 2023 16:17:10 -0800 (PST) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id iw12-20020a170903044c00b00186acb14c4asm3423593plb.67.2023.01.27.16.17.09 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 27 Jan 2023 16:17:10 -0800 (PST) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 10/30] sched: Add @reason to sched_class->rq_{on|off}line() Date: Fri, 27 Jan 2023 14:16:19 -1000 Message-Id: <20230128001639.3510083-11-tj@kernel.org> X-Mailer: git-send-email 2.39.1 In-Reply-To: <20230128001639.3510083-1-tj@kernel.org> References: <20230128001639.3510083-1-tj@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org ->rq_{on|off}line are called either during CPU hotplug or cpuset partition updates. A planned BPF extensible sched_class wants to tell the BPF scheduler progs about CPU hotplug events in a way that's synchronized with rq state changes. As the BPF scheduler progs aren't necessarily affected by cpuset partition updates, we need a way to distinguish the two types of events. Let's add an argument to tell them apart. v2: Patch description updated to detail the expected use. Signed-off-by: Tejun Heo Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- kernel/sched/core.c | 12 ++++++------ kernel/sched/deadline.c | 4 ++-- kernel/sched/fair.c | 4 ++-- kernel/sched/rt.c | 4 ++-- kernel/sched/sched.h | 13 +++++++++---- kernel/sched/topology.c | 4 ++-- 6 files changed, 23 insertions(+), 18 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 729de63ee6ec..6447f10ecd44 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -9355,7 +9355,7 @@ static inline void balance_hotplug_wait(void) #endif /* CONFIG_HOTPLUG_CPU */ -void set_rq_online(struct rq *rq) +void set_rq_online(struct rq *rq, enum rq_onoff_reason reason) { if (!rq->online) { const struct sched_class *class; @@ -9365,19 +9365,19 @@ void set_rq_online(struct rq *rq) for_each_class(class) { if (class->rq_online) - class->rq_online(rq); + class->rq_online(rq, reason); } } } -void set_rq_offline(struct rq *rq) +void set_rq_offline(struct rq *rq, enum rq_onoff_reason reason) { if (rq->online) { const struct sched_class *class; for_each_class(class) { if (class->rq_offline) - class->rq_offline(rq); + class->rq_offline(rq, reason); } cpumask_clear_cpu(rq->cpu, rq->rd->online); @@ -9473,7 +9473,7 @@ int sched_cpu_activate(unsigned int cpu) rq_lock_irqsave(rq, &rf); if (rq->rd) { BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); - set_rq_online(rq); + set_rq_online(rq, RQ_ONOFF_HOTPLUG); } rq_unlock_irqrestore(rq, &rf); @@ -9518,7 +9518,7 @@ int sched_cpu_deactivate(unsigned int cpu) if (rq->rd) { update_rq_clock(rq); BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); - set_rq_offline(rq); + set_rq_offline(rq, RQ_ONOFF_HOTPLUG); } rq_unlock_irqrestore(rq, &rf); diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 0d97d54276cc..f264814e0513 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -2518,7 +2518,7 @@ static void set_cpus_allowed_dl(struct task_struct *p, } /* Assumes rq->lock is held */ -static void rq_online_dl(struct rq *rq) +static void rq_online_dl(struct rq *rq, enum rq_onoff_reason reason) { if (rq->dl.overloaded) dl_set_overload(rq); @@ -2529,7 +2529,7 @@ static void rq_online_dl(struct rq *rq) } /* Assumes rq->lock is held */ -static void rq_offline_dl(struct rq *rq) +static void rq_offline_dl(struct rq *rq, enum rq_onoff_reason reason) { if (rq->dl.overloaded) dl_clear_overload(rq); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index fbedf99ed953..b4db8943ed8b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -11673,14 +11673,14 @@ void trigger_load_balance(struct rq *rq) nohz_balancer_kick(rq); } -static void rq_online_fair(struct rq *rq) +static void rq_online_fair(struct rq *rq, enum rq_onoff_reason reason) { update_sysctl(); update_runtime_enabled(rq); } -static void rq_offline_fair(struct rq *rq) +static void rq_offline_fair(struct rq *rq, enum rq_onoff_reason reason) { update_sysctl(); diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index ed2a47e4ddae..0fb7ee087669 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -2470,7 +2470,7 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p) } /* Assumes rq->lock is held */ -static void rq_online_rt(struct rq *rq) +static void rq_online_rt(struct rq *rq, enum rq_onoff_reason reason) { if (rq->rt.overloaded) rt_set_overload(rq); @@ -2481,7 +2481,7 @@ static void rq_online_rt(struct rq *rq) } /* Assumes rq->lock is held */ -static void rq_offline_rt(struct rq *rq) +static void rq_offline_rt(struct rq *rq, enum rq_onoff_reason reason) { if (rq->rt.overloaded) rt_clear_overload(rq); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index dd567eb7881a..cc1163b15aa0 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2180,6 +2180,11 @@ extern const u32 sched_prio_to_wmult[40]; #define RETRY_TASK ((void *)-1UL) +enum rq_onoff_reason { + RQ_ONOFF_HOTPLUG, /* CPU is going on/offline */ + RQ_ONOFF_TOPOLOGY, /* sched domain topology update */ +}; + struct affinity_context { const struct cpumask *new_mask; struct cpumask *user_mask; @@ -2216,8 +2221,8 @@ struct sched_class { void (*set_cpus_allowed)(struct task_struct *p, struct affinity_context *ctx); - void (*rq_online)(struct rq *rq); - void (*rq_offline)(struct rq *rq); + void (*rq_online)(struct rq *rq, enum rq_onoff_reason reason); + void (*rq_offline)(struct rq *rq, enum rq_onoff_reason reason); struct rq *(*find_lock_rq)(struct task_struct *p, struct rq *rq); #endif @@ -2782,8 +2787,8 @@ static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) raw_spin_rq_unlock(rq1); } -extern void set_rq_online (struct rq *rq); -extern void set_rq_offline(struct rq *rq); +extern void set_rq_online (struct rq *rq, enum rq_onoff_reason reason); +extern void set_rq_offline(struct rq *rq, enum rq_onoff_reason reason); extern bool sched_smp_initialized; #else /* CONFIG_SMP */ diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 8739c2a5a54e..0e859bea1cb6 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -493,7 +493,7 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd) old_rd = rq->rd; if (cpumask_test_cpu(rq->cpu, old_rd->online)) - set_rq_offline(rq); + set_rq_offline(rq, RQ_ONOFF_TOPOLOGY); cpumask_clear_cpu(rq->cpu, old_rd->span); @@ -511,7 +511,7 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd) cpumask_set_cpu(rq->cpu, rd->span); if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) - set_rq_online(rq); + set_rq_online(rq, RQ_ONOFF_TOPOLOGY); raw_spin_rq_unlock_irqrestore(rq, flags); From patchwork Sat Jan 28 00:16:20 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 13119566 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 40890C38142 for ; Sat, 28 Jan 2023 00:18:04 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232859AbjA1ASC (ORCPT ); Fri, 27 Jan 2023 19:18:02 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:36760 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232180AbjA1ARi (ORCPT ); Fri, 27 Jan 2023 19:17:38 -0500 Received: from mail-pl1-x631.google.com (mail-pl1-x631.google.com [IPv6:2607:f8b0:4864:20::631]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 1E9C18CE37; Fri, 27 Jan 2023 16:17:12 -0800 (PST) Received: by mail-pl1-x631.google.com with SMTP id jm10so6535493plb.13; Fri, 27 Jan 2023 16:17:12 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=jROUCebATWHzru8W0ZlCpbTQs5sWj+GIwoxmhemafT4=; b=pQ8MBm2tl47RCvTAA3WauynPpa04W1aKkRrEeDObjEVsK8Ab8jq6/ZXkL4Erqz/fVz R354yJ4Hb7mJD3OpFyDwkJLJHSmp3vmQcC1kWd0rMGIJExGewN98PLvpYV6d+UQ2mV4u WDeP49mX6jp1+SUIaUkPwLKPJroTt53RKRQHHOGzNvGNRDvBShMKPdofxAlKmRmr5kjo r31Enjjha/MGbDzlzAUTmB6vfyIS01mNHgeqO2uFkYHtUhsJJId8HAttd9gJb2mPSCVf g3rRfAwTUawOQ2uDD65tf2+jP1MW6GuX/+sAaTnXKpNLZxlYFJPS15XOlXwzvDOsH16M 085g== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=jROUCebATWHzru8W0ZlCpbTQs5sWj+GIwoxmhemafT4=; b=KzQb9EHqXIjva6GBwoVHi5uY71yOBzXJb83m+9MZfhcIdZGxfUqEQ/6AuUigOfPbN+ IG9aU6fbMtwpkpNrlMyCABCXCD76DE3pfrsrksPWBAys5KYrJMsCVHoOXXet2zhvj6KX ZPVhrpZyTtEsLKQk6mpP1tG5JmxW3Xrlpe8d74Rhiq0YclpuGdF7wuh2OP6urr6Vqkf+ CmNwXxG7SV021jbYibU1b3hkfA9ni4c5qLKHGfm4ZRueZ7UKI/qGwYgSlT4BopsOQ0Cf Ng7I72XVUjTFRqAMNRTLg9ZEqzBIQxwRBtf6HuIPx7kraLwuX0IeJ6bOHe3lJewxJVmX Y1qg== X-Gm-Message-State: AFqh2kohWizncCGBZxueO8JbC2LrpWWDEUEuyzOqAiAFUT2wbvq8IiIh 6vnAiqFo8N/DpxvdONxaPms= X-Google-Smtp-Source: AMrXdXt4JvKNriBpmFEfiXMKmDPkQFJs9UWUHpApjn1/vhyKrrs85QEgvE41GDhuRX2zFJA4j6molQ== X-Received: by 2002:a05:6a20:7d98:b0:ad:3ada:c712 with SMTP id v24-20020a056a207d9800b000ad3adac712mr55134239pzj.14.1674865032178; Fri, 27 Jan 2023 16:17:12 -0800 (PST) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id e13-20020aa798cd000000b0058bc5f6426asm3133204pfm.169.2023.01.27.16.17.11 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 27 Jan 2023 16:17:11 -0800 (PST) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 11/30] sched: Add normal_policy() Date: Fri, 27 Jan 2023 14:16:20 -1000 Message-Id: <20230128001639.3510083-12-tj@kernel.org> X-Mailer: git-send-email 2.39.1 In-Reply-To: <20230128001639.3510083-1-tj@kernel.org> References: <20230128001639.3510083-1-tj@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org A new BPF extensible sched_class will need to dynamically change how a task picks its sched_class. For example, if the loaded BPF scheduler progs fail, the tasks will be forced back on CFS even if the task's policy is set to the new sched_class. To support such mapping, add normal_policy() which wraps testing for %SCHED_NORMAL. This doesn't cause any behavior changes. v2: Update the description with more details on the expected use. Signed-off-by: Tejun Heo Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- kernel/sched/fair.c | 2 +- kernel/sched/sched.h | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index b4db8943ed8b..6055903cc3de 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7617,7 +7617,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ * Batch and idle tasks do not preempt non-idle tasks (their preemption * is driven by the tick): */ - if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION)) + if (unlikely(!normal_policy(p->policy)) || !sched_feat(WAKEUP_PREEMPTION)) return; find_matching_se(&se, &pse); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index cc1163b15aa0..91b6fed6aa93 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -182,9 +182,15 @@ static inline int idle_policy(int policy) { return policy == SCHED_IDLE; } + +static inline int normal_policy(int policy) +{ + return policy == SCHED_NORMAL; +} + static inline int fair_policy(int policy) { - return policy == SCHED_NORMAL || policy == SCHED_BATCH; + return normal_policy(policy) || policy == SCHED_BATCH; } static inline int rt_policy(int policy) From patchwork Sat Jan 28 00:16:21 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 13119567 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 139FDC54EAA for ; Sat, 28 Jan 2023 00:18:17 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232696AbjA1ASP (ORCPT ); Fri, 27 Jan 2023 19:18:15 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:36802 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232925AbjA1ARj (ORCPT ); Fri, 27 Jan 2023 19:17:39 -0500 Received: from mail-pj1-x1029.google.com (mail-pj1-x1029.google.com [IPv6:2607:f8b0:4864:20::1029]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 9D1468CE10; Fri, 27 Jan 2023 16:17:14 -0800 (PST) Received: by mail-pj1-x1029.google.com with SMTP id x2-20020a17090a46c200b002295ca9855aso10221895pjg.2; Fri, 27 Jan 2023 16:17:14 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=x4vCYz2beF9ROB0r0Gl728SjZFvQ7n/iXYiikxbP3n0=; b=KMhhq72veYUOFqhd6X9qf2fdXBypP80XYIye0ym74+A1efyLRprDjbFCGLRTe3a7cD xVC2XedgeNnW+6aSCq2G/WYSQoBvy1R3rmZXeqlH5xhFvgopOwkz1p2sk3HuTyeidPHk Dc9Z+h02bNBmCTsIDRpx2zSDuIxZyd45dIM5eZSk2jHQqGqpMaJBS7SY4bAKW1aogMkj 3x9qXHfMO0uD48aA5JjjHjZZ8Ytzyfh+J8bAn/XD1HgUsEAYOQ+NUueJl2Mb1yUmmtgG 08ZGj1x8FBVaXEME1WCsOXDelo6qzusu0n4JVjvMmgjoQeiP5NpnNfT9TrIa3Y7QRjj8 iJ6A== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=x4vCYz2beF9ROB0r0Gl728SjZFvQ7n/iXYiikxbP3n0=; b=JjYphUW9xY04v6VqdU4lZS2OIyg5FORJoBhpHSwycbvMGgvZVtMtXbfpk1dGp7EOAR LvXnOVzkaKCtUhgqF+W5gKVl3zXsCoj/uo77ugqMR1oUQgxii1/SSJkKB+wPNSbWEqCj XPiCJboRWn3M6qUAweJADkqN9D7GyPxXkuQs3+Ex4J/aizNZCTKg1XMqlgup4gJ7MvXl qkyPGCVntLLJmFvVf1lhJob+yyXIoRP1F3Kj3r9eS4ccTTDHgLosvN4OllRakMcSgOGv 3F9Lg5L7SfAd6jdmlo0K2lT2wSMHEs28BYztqdyhhkNFbjdWJSFnDmA8zP/EsnGjR/jU 4vRw== X-Gm-Message-State: AO0yUKX24ZmlfKoEk8PqUQjDLGV0GShY8mONDQmfaQzkjeqTKrtUXbwa bxoeb0+wAqgXuhm0fUeVe2c= X-Google-Smtp-Source: AK7set+1KkQ49qWAPCdlYzUfto+yptNFQcWCZcuL3237VUu+irbxCAkxlpR2j4y7/KqSGarYPg1rMA== X-Received: by 2002:a17:902:c406:b0:196:e8e:cd28 with SMTP id k6-20020a170902c40600b001960e8ecd28mr21121116plk.15.1674865033944; Fri, 27 Jan 2023 16:17:13 -0800 (PST) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id r20-20020a170902be1400b00192a8b35fa3sm3413297pls.122.2023.01.27.16.17.13 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 27 Jan 2023 16:17:13 -0800 (PST) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 12/30] sched_ext: Add boilerplate for extensible scheduler class Date: Fri, 27 Jan 2023 14:16:21 -1000 Message-Id: <20230128001639.3510083-13-tj@kernel.org> X-Mailer: git-send-email 2.39.1 In-Reply-To: <20230128001639.3510083-1-tj@kernel.org> References: <20230128001639.3510083-1-tj@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org This adds dummy implementations of sched_ext interfaces which interact with the scheduler core and hook them in the correct places. As they're all dummies, this doesn't cause any behavior changes. This is split out to help reviewing. v2: balance_scx_on_up() dropped. This will be handled in sched_ext proper. Signed-off-by: Tejun Heo Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- include/linux/sched/ext.h | 12 ++++++++++++ kernel/fork.c | 2 ++ kernel/sched/core.c | 32 ++++++++++++++++++++++++-------- kernel/sched/ext.h | 24 ++++++++++++++++++++++++ kernel/sched/idle.c | 2 ++ kernel/sched/sched.h | 2 ++ 6 files changed, 66 insertions(+), 8 deletions(-) create mode 100644 include/linux/sched/ext.h create mode 100644 kernel/sched/ext.h diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h new file mode 100644 index 000000000000..a05dfcf533b0 --- /dev/null +++ b/include/linux/sched/ext.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SCHED_EXT_H +#define _LINUX_SCHED_EXT_H + +#ifdef CONFIG_SCHED_CLASS_EXT +#error "NOT IMPLEMENTED YET" +#else /* !CONFIG_SCHED_CLASS_EXT */ + +static inline void sched_ext_free(struct task_struct *p) {} + +#endif /* CONFIG_SCHED_CLASS_EXT */ +#endif /* _LINUX_SCHED_EXT_H */ diff --git a/kernel/fork.c b/kernel/fork.c index 184c622b5513..f5aad7fa6350 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -846,6 +847,7 @@ void __put_task_struct(struct task_struct *tsk) WARN_ON(refcount_read(&tsk->usage)); WARN_ON(tsk == current); + sched_ext_free(tsk); io_uring_free(tsk); cgroup_free(tsk); task_numa_free(tsk, true); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 6447f10ecd44..6058042dcc3d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4623,6 +4623,8 @@ late_initcall(sched_core_sysctl_init); */ int sched_fork(unsigned long clone_flags, struct task_struct *p) { + int ret; + __sched_fork(clone_flags, p); /* * We mark the process as NEW here. This guarantees that @@ -4659,12 +4661,16 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) p->sched_reset_on_fork = 0; } - if (dl_prio(p->prio)) - return -EAGAIN; - else if (rt_prio(p->prio)) + scx_pre_fork(p); + + if (dl_prio(p->prio)) { + ret = -EAGAIN; + goto out_cancel; + } else if (rt_prio(p->prio)) { p->sched_class = &rt_sched_class; - else + } else { p->sched_class = &fair_sched_class; + } init_entity_runnable_average(&p->se); @@ -4682,6 +4688,10 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) RB_CLEAR_NODE(&p->pushable_dl_tasks); #endif return 0; + +out_cancel: + scx_cancel_fork(p); + return ret; } int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs) @@ -4712,16 +4722,18 @@ int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs) p->sched_class->task_fork(p); raw_spin_unlock_irqrestore(&p->pi_lock, flags); - return 0; + return scx_fork(p); } void sched_cancel_fork(struct task_struct *p) { + scx_cancel_fork(p); } void sched_post_fork(struct task_struct *p) { uclamp_post_fork(p); + scx_post_fork(p); } unsigned long to_ratio(u64 period, u64 runtime) @@ -5868,7 +5880,7 @@ static void put_prev_task_balance(struct rq *rq, struct task_struct *prev, * We can terminate the balance pass as soon as we know there is * a runnable task of @class priority or higher. */ - for_class_range(class, prev->sched_class, &idle_sched_class) { + for_balance_class_range(class, prev->sched_class, &idle_sched_class) { if (class->balance(rq, prev, rf)) break; } @@ -5886,6 +5898,9 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) const struct sched_class *class; struct task_struct *p; + if (scx_enabled()) + goto restart; + /* * Optimization: we know that if all tasks are in the fair class we can * call that function directly, but only if the @prev task wasn't of a @@ -5911,7 +5926,7 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) restart: put_prev_task_balance(rq, prev, rf); - for_each_class(class) { + for_each_active_class(class) { p = class->pick_next_task(rq); if (p) return p; @@ -5944,7 +5959,7 @@ static inline struct task_struct *pick_task(struct rq *rq) const struct sched_class *class; struct task_struct *p; - for_each_class(class) { + for_each_active_class(class) { p = class->pick_task(rq); if (p) return p; @@ -9880,6 +9895,7 @@ void __init sched_init(void) balance_push_set(smp_processor_id(), false); #endif init_sched_fair_class(); + init_sched_ext_class(); psi_init(); diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h new file mode 100644 index 000000000000..6a93c4825339 --- /dev/null +++ b/kernel/sched/ext.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifdef CONFIG_SCHED_CLASS_EXT +#error "NOT IMPLEMENTED YET" +#else /* CONFIG_SCHED_CLASS_EXT */ + +#define scx_enabled() false + +static inline void scx_pre_fork(struct task_struct *p) {} +static inline int scx_fork(struct task_struct *p) { return 0; } +static inline void scx_post_fork(struct task_struct *p) {} +static inline void scx_cancel_fork(struct task_struct *p) {} +static inline void init_sched_ext_class(void) {} + +#define for_each_active_class for_each_class +#define for_balance_class_range for_class_range + +#endif /* CONFIG_SCHED_CLASS_EXT */ + +#if defined(CONFIG_SCHED_CLASS_EXT) && defined(CONFIG_SMP) +#error "NOT IMPLEMENTED YET" +#else +static inline void scx_update_idle(struct rq *rq, bool idle) {} +#endif diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index f26ab2675f7d..86bc5832bdc4 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -428,11 +428,13 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) { + scx_update_idle(rq, false); } static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first) { update_idle_core(rq); + scx_update_idle(rq, true); schedstat_inc(rq->sched_goidle); } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 91b6fed6aa93..d57d17d8ea6b 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -3366,4 +3366,6 @@ enum cpu_cftype_id { extern struct cftype cpu_cftypes[CPU_CFTYPE_CNT + 1]; #endif /* CONFIG_CGROUP_SCHED */ +#include "ext.h" + #endif /* _KERNEL_SCHED_SCHED_H */ From patchwork Sat Jan 28 00:16:23 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 13119569 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 496ADC61DA7 for ; Sat, 28 Jan 2023 00:18:34 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231616AbjA1ASb (ORCPT ); Fri, 27 Jan 2023 19:18:31 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:36750 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232793AbjA1AR7 (ORCPT ); Fri, 27 Jan 2023 19:17:59 -0500 Received: from mail-pg1-x52b.google.com (mail-pg1-x52b.google.com [IPv6:2607:f8b0:4864:20::52b]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 355FC8D087; Fri, 27 Jan 2023 16:17:20 -0800 (PST) Received: by mail-pg1-x52b.google.com with SMTP id 36so4232960pgp.10; Fri, 27 Jan 2023 16:17:20 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=Rmc1QkfbcYCP0+1/1Bfxt/7PlyeNBnS/QuLPrVV9hic=; b=bdG8VGv+409qnqOmS1aQHWHih8JWgNqQbDmJl2N2SVl+9QJtrLc2QGYJw+DOp/pRIZ HgVfL/9dluCmu0oPP0BOPTfhHzIONFHNl1SI1HIgks06yp3otjyyKc8bLtvXn8VPrn62 Ry+eKrtqbWfR1TMmfHhUd5u0gXbXzTRq+0VmNzcSCuJa33N0EtWf7PAuFKCW5+SJiv23 XGTNZimnHAjrhk14TN0exZ85OuVlHKLvVVuM2wMMIcfPSDJKpsfKZuQdrZa6vMECKk61 vId2McbecBJN7HKMg1TB9kujyApk8Bp+PlRNNAOF8+T3pDI1VQzAazkp8hC14HP32e0W bU2g== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=Rmc1QkfbcYCP0+1/1Bfxt/7PlyeNBnS/QuLPrVV9hic=; b=egY+8X8iap3jDMZDZ9bZqBY7IIqmr0Q7yzhhxA7EOD7MGlNUfhnstiId93q2jts51/ Ns+BWvMHWxb6OM5p/MVxgUZZl4ST1J/f2CZ3hWm3UCZJsCJtzrS0IpLEOIyoINotB9Tt kkZNiSCS/uB0ZAARqX/9iaRS6D++GsUWIa/PvrYlrobyEMIJY24T/clLV2jUFoZAVvhF QbIFCSyF1AU2iFMLGjMgSt4rdb5cmKmp0QDEapy7O0Y5mdL7eaIWg0c0pccKisQWoxMU 4vND0YK0oNfaLsBuLpM/CkxWrKpggm8YTnQxhN55TgpLETFZmbBfQRgML/5hjtE2T2Ro +XVg== X-Gm-Message-State: AO0yUKVjaMJyQJy3yHvLOKSS3rB9olwn1dj6pojAXVl7+7OElRuAzSze FUu5F55v8JpvFPObBr5+lJc= X-Google-Smtp-Source: AK7set+nkG1XzbGdK9dhdNlR7ii/0SAfHenLlRzvzK9Nxqnd5tQrYsu79ISOo+oNxOG3wodaDX9Yfg== X-Received: by 2002:a05:6a00:420f:b0:590:7623:9c6f with SMTP id cd15-20020a056a00420f00b0059076239c6fmr231418pfb.34.1674865037937; Fri, 27 Jan 2023 16:17:17 -0800 (PST) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id o23-20020aa79797000000b00580978caca7sm3202480pfp.45.2023.01.27.16.17.17 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 27 Jan 2023 16:17:17 -0800 (PST) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 14/30] sched_ext: Add scx_example_dummy and scx_example_qmap example schedulers Date: Fri, 27 Jan 2023 14:16:23 -1000 Message-Id: <20230128001639.3510083-15-tj@kernel.org> X-Mailer: git-send-email 2.39.1 In-Reply-To: <20230128001639.3510083-1-tj@kernel.org> References: <20230128001639.3510083-1-tj@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org Add two simple example BPF schedulers - dummy and qmap. * dummy: In terms of scheduling, it behaves identical to not having any operation implemented at all. The two operations it implements are only to improve visibility and exit handling. On certain homogeneous configurations, this actually can perform pretty well. * qmap: A fixed five level priority scheduler to demonstrate queueing PIDs on BPF maps for scheduling. While not very practical, this is useful as a simple example and will be used to demonstrate different features. v2: Updated with the generic BPF cpumask helpers. Signed-off-by: Tejun Heo Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- tools/sched_ext/.gitignore | 5 + tools/sched_ext/Makefile | 188 +++++++++++++++++++ tools/sched_ext/gnu/stubs.h | 1 + tools/sched_ext/scx_common.bpf.h | 131 +++++++++++++ tools/sched_ext/scx_example_dummy.bpf.c | 56 ++++++ tools/sched_ext/scx_example_dummy.c | 93 +++++++++ tools/sched_ext/scx_example_qmap.bpf.c | 238 ++++++++++++++++++++++++ tools/sched_ext/scx_example_qmap.c | 84 +++++++++ tools/sched_ext/user_exit_info.h | 50 +++++ 9 files changed, 846 insertions(+) create mode 100644 tools/sched_ext/.gitignore create mode 100644 tools/sched_ext/Makefile create mode 100644 tools/sched_ext/gnu/stubs.h create mode 100644 tools/sched_ext/scx_common.bpf.h create mode 100644 tools/sched_ext/scx_example_dummy.bpf.c create mode 100644 tools/sched_ext/scx_example_dummy.c create mode 100644 tools/sched_ext/scx_example_qmap.bpf.c create mode 100644 tools/sched_ext/scx_example_qmap.c create mode 100644 tools/sched_ext/user_exit_info.h diff --git a/tools/sched_ext/.gitignore b/tools/sched_ext/.gitignore new file mode 100644 index 000000000000..6734f7fd9324 --- /dev/null +++ b/tools/sched_ext/.gitignore @@ -0,0 +1,5 @@ +scx_example_dummy +scx_example_qmap +*.skel.h +*.subskel.h +/tools/ diff --git a/tools/sched_ext/Makefile b/tools/sched_ext/Makefile new file mode 100644 index 000000000000..926b0a36c221 --- /dev/null +++ b/tools/sched_ext/Makefile @@ -0,0 +1,188 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) 2022 Meta Platforms, Inc. and affiliates. +include ../build/Build.include +include ../scripts/Makefile.arch +include ../scripts/Makefile.include + +ifneq ($(LLVM),) +ifneq ($(filter %/,$(LLVM)),) +LLVM_PREFIX := $(LLVM) +else ifneq ($(filter -%,$(LLVM)),) +LLVM_SUFFIX := $(LLVM) +endif + +CLANG_TARGET_FLAGS_arm := arm-linux-gnueabi +CLANG_TARGET_FLAGS_arm64 := aarch64-linux-gnu +CLANG_TARGET_FLAGS_hexagon := hexagon-linux-musl +CLANG_TARGET_FLAGS_m68k := m68k-linux-gnu +CLANG_TARGET_FLAGS_mips := mipsel-linux-gnu +CLANG_TARGET_FLAGS_powerpc := powerpc64le-linux-gnu +CLANG_TARGET_FLAGS_riscv := riscv64-linux-gnu +CLANG_TARGET_FLAGS_s390 := s390x-linux-gnu +CLANG_TARGET_FLAGS_x86 := x86_64-linux-gnu +CLANG_TARGET_FLAGS := $(CLANG_TARGET_FLAGS_$(ARCH)) + +ifeq ($(CROSS_COMPILE),) +ifeq ($(CLANG_TARGET_FLAGS),) +$(error Specify CROSS_COMPILE or add '--target=' option to lib.mk +else +CLANG_FLAGS += --target=$(CLANG_TARGET_FLAGS) +endif # CLANG_TARGET_FLAGS +else +CLANG_FLAGS += --target=$(notdir $(CROSS_COMPILE:%-=%)) +endif # CROSS_COMPILE + +CC := $(LLVM_PREFIX)clang$(LLVM_SUFFIX) $(CLANG_FLAGS) -fintegrated-as +else +CC := $(CROSS_COMPILE)gcc +endif # LLVM + +CURDIR := $(abspath .) +TOOLSDIR := $(abspath ..) +LIBDIR := $(TOOLSDIR)/lib +BPFDIR := $(LIBDIR)/bpf +TOOLSINCDIR := $(TOOLSDIR)/include +BPFTOOLDIR := $(TOOLSDIR)/bpf/bpftool +APIDIR := $(TOOLSINCDIR)/uapi +GENDIR := $(abspath ../../include/generated) +GENHDR := $(GENDIR)/autoconf.h + +SCRATCH_DIR := $(CURDIR)/tools +BUILD_DIR := $(SCRATCH_DIR)/build +INCLUDE_DIR := $(SCRATCH_DIR)/include +BPFOBJ_DIR := $(BUILD_DIR)/libbpf +BPFOBJ := $(BPFOBJ_DIR)/libbpf.a +ifneq ($(CROSS_COMPILE),) +HOST_BUILD_DIR := $(BUILD_DIR)/host +HOST_SCRATCH_DIR := host-tools +HOST_INCLUDE_DIR := $(HOST_SCRATCH_DIR)/include +else +HOST_BUILD_DIR := $(BUILD_DIR) +HOST_SCRATCH_DIR := $(SCRATCH_DIR) +HOST_INCLUDE_DIR := $(INCLUDE_DIR) +endif +HOST_BPFOBJ := $(HOST_BUILD_DIR)/libbpf/libbpf.a +RESOLVE_BTFIDS := $(HOST_BUILD_DIR)/resolve_btfids/resolve_btfids +DEFAULT_BPFTOOL := $(HOST_SCRATCH_DIR)/sbin/bpftool + +VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \ + $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \ + ../../vmlinux \ + /sys/kernel/btf/vmlinux \ + /boot/vmlinux-$(shell uname -r) +VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS)))) +ifeq ($(VMLINUX_BTF),) +$(error Cannot find a vmlinux for VMLINUX_BTF at any of "$(VMLINUX_BTF_PATHS)") +endif + +BPFTOOL ?= $(DEFAULT_BPFTOOL) + +ifneq ($(wildcard $(GENHDR)),) + GENFLAGS := -DHAVE_GENHDR +endif + +CFLAGS += -g -O2 -rdynamic -pthread -Wall -Werror $(GENFLAGS) \ + -I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR) \ + -I$(TOOLSINCDIR) -I$(APIDIR) + +# Silence some warnings when compiled with clang +ifneq ($(LLVM),) +CFLAGS += -Wno-unused-command-line-argument +endif + +LDFLAGS = -lelf -lz -lpthread + +IS_LITTLE_ENDIAN = $(shell $(CC) -dM -E - &1 \ + | sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }') \ +$(shell $(1) -dM -E - $@ +else + $(call msg,CP,,$@) + $(Q)cp "$(VMLINUX_H)" $@ +endif + +%.bpf.o: %.bpf.c $(INCLUDE_DIR)/vmlinux.h scx_common.bpf.h user_exit_info.h \ + | $(BPFOBJ) + $(call msg,CLNG-BPF,,$@) + $(Q)$(CLANG) $(BPF_CFLAGS) -target bpf -c $< -o $@ + +%.skel.h: %.bpf.o $(BPFTOOL) + $(call msg,GEN-SKEL,,$@) + $(Q)$(BPFTOOL) gen object $(<:.o=.linked1.o) $< + $(Q)$(BPFTOOL) gen object $(<:.o=.linked2.o) $(<:.o=.linked1.o) + $(Q)$(BPFTOOL) gen object $(<:.o=.linked3.o) $(<:.o=.linked2.o) + $(Q)diff $(<:.o=.linked2.o) $(<:.o=.linked3.o) + $(Q)$(BPFTOOL) gen skeleton $(<:.o=.linked3.o) name $(<:.bpf.o=) > $@ + $(Q)$(BPFTOOL) gen subskeleton $(<:.o=.linked3.o) name $(<:.bpf.o=) > $(@:.skel.h=.subskel.h) + +scx_example_dummy: scx_example_dummy.c scx_example_dummy.skel.h user_exit_info.h + $(CC) $(CFLAGS) -c $< -o $@.o + $(CC) -o $@ $@.o $(HOST_BPFOBJ) $(LDFLAGS) + +scx_example_qmap: scx_example_qmap.c scx_example_qmap.skel.h user_exit_info.h + $(CC) $(CFLAGS) -c $< -o $@.o + $(CC) -o $@ $@.o $(HOST_BPFOBJ) $(LDFLAGS) + +clean: + rm -rf $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) + rm -f *.o *.bpf.o *.skel.h *.subskel.h + rm -f scx_example_dummy scx_example_qmap + +.PHONY: all clean + +# delete failed targets +.DELETE_ON_ERROR: + +# keep intermediate (.skel.h, .bpf.o, etc) targets +.SECONDARY: diff --git a/tools/sched_ext/gnu/stubs.h b/tools/sched_ext/gnu/stubs.h new file mode 100644 index 000000000000..719225b16626 --- /dev/null +++ b/tools/sched_ext/gnu/stubs.h @@ -0,0 +1 @@ +/* dummy .h to trick /usr/include/features.h to work with 'clang -target bpf' */ diff --git a/tools/sched_ext/scx_common.bpf.h b/tools/sched_ext/scx_common.bpf.h new file mode 100644 index 000000000000..b40a4fc6a159 --- /dev/null +++ b/tools/sched_ext/scx_common.bpf.h @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2022 Tejun Heo + * Copyright (c) 2022 David Vernet + */ +#ifndef __SCHED_EXT_COMMON_BPF_H +#define __SCHED_EXT_COMMON_BPF_H + +#include "vmlinux.h" +#include +#include +#include +#include "user_exit_info.h" + +/* + * Earlier versions of clang/pahole lost upper 32bits in 64bit enums which can + * lead to really confusing misbehaviors. Let's trigger a build failure. + */ +static inline void ___vmlinux_h_sanity_check___(void) +{ + _Static_assert(SCX_DSQ_FLAG_BUILTIN, + "bpftool generated vmlinux.h is missing high bits for 64bit enums, upgrade clang and pahole"); +} + +void scx_bpf_error_bstr(char *fmt, unsigned long long *data, u32 data_len) __ksym; + +static inline __attribute__((format(printf, 1, 2))) +void ___scx_bpf_error_format_checker(const char *fmt, ...) {} + +/* + * scx_bpf_error() wraps the scx_bpf_error_bstr() kfunc with variadic arguments + * instead of an array of u64. Note that __param[] must have at least one + * element to keep the verifier happy. + */ +#define scx_bpf_error(fmt, args...) \ +({ \ + static char ___fmt[] = fmt; \ + unsigned long long ___param[___bpf_narg(args) ?: 1] = {}; \ + \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ + ___bpf_fill(___param, args); \ + _Pragma("GCC diagnostic pop") \ + \ + scx_bpf_error_bstr(___fmt, ___param, sizeof(___param)); \ + \ + ___scx_bpf_error_format_checker(fmt, ##args); \ +}) + +/* BPF core task / cgroup kfunc helpers */ +struct task_struct *bpf_task_from_pid(s32 pid) __ksym; +struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym; +void bpf_task_release(struct task_struct *p) __ksym; + +/* BPF core cpumask kfuncs */ +struct bpf_cpumask *bpf_cpumask_create(void) __ksym; +struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __ksym; +struct bpf_cpumask *bpf_cpumask_kptr_get(struct bpf_cpumask **map_value) __ksym; +void bpf_cpumask_release(struct bpf_cpumask *cpumask) __ksym; +bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) __ksym; +void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; +void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; +void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) __ksym; +bool bpf_cpumask_and(struct bpf_cpumask *dst, const struct cpumask *src1, + const struct cpumask *src2) __ksym; +u32 bpf_cpumask_first(const struct cpumask *cpumask) __ksym; + +s32 scx_bpf_create_dsq(u64 dsq_id, s32 node) __ksym; +bool scx_bpf_consume(u64 dsq_id) __ksym; +u32 scx_bpf_dispatch_nr_slots(void) __ksym; +void scx_bpf_dispatch(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym; +s32 scx_bpf_dsq_nr_queued(u64 dsq_id) __ksym; +bool scx_bpf_test_and_clear_cpu_idle(s32 cpu) __ksym; +s32 scx_bpf_pick_idle_cpu(const cpumask_t *cpus_allowed) __ksym; +const struct cpumask *scx_bpf_get_idle_percpu_cpumask(void) __ksym; +const struct cpumask *scx_bpf_get_idle_smt_cpumask(void) __ksym; +void scx_bpf_put_idle_cpumask(const struct cpumask *cpumask) __ksym; +void scx_bpf_destroy_dsq(u64 dsq_id) __ksym; +bool scx_bpf_task_running(const struct task_struct *p) __ksym; +s32 scx_bpf_task_cpu(const struct task_struct *p) __ksym; + +#define PF_KTHREAD 0x00200000 /* I am a kernel thread */ +#define PF_EXITING 0x00000004 +#define CLOCK_MONOTONIC 1 + +#define BPF_STRUCT_OPS(name, args...) \ +SEC("struct_ops/"#name) \ +BPF_PROG(name, ##args) + +#define BPF_STRUCT_OPS_SLEEPABLE(name, args...) \ +SEC("struct_ops.s/"#name) \ +BPF_PROG(name, ##args) + +/** + * MEMBER_VPTR - Obtain the verified pointer to a struct or array member + * @base: struct or array to index + * @member: dereferenced member (e.g. ->field, [idx0][idx1], ...) + * + * The verifier often gets confused by the instruction sequence the compiler + * generates for indexing struct fields or arrays. This macro forces the + * compiler to generate a code sequence which first calculates the byte offset, + * checks it against the struct or array size and add that byte offset to + * generate the pointer to the member to help the verifier. + * + * Ideally, we want to abort if the calculated offset is out-of-bounds. However, + * BPF currently doesn't support abort, so evaluate to NULL instead. The caller + * must check for NULL and take appropriate action to appease the verifier. To + * avoid confusing the verifier, it's best to check for NULL and dereference + * immediately. + * + * vptr = MEMBER_VPTR(my_array, [i][j]); + * if (!vptr) + * return error; + * *vptr = new_value; + */ +#define MEMBER_VPTR(base, member) (typeof(base member) *)({ \ + u64 __base = (u64)base; \ + u64 __addr = (u64)&(base member) - __base; \ + asm volatile ( \ + "if %0 <= %[max] goto +2\n" \ + "%0 = 0\n" \ + "goto +1\n" \ + "%0 += %1\n" \ + : "+r"(__addr) \ + : "r"(__base), \ + [max]"i"(sizeof(base) - sizeof(base member))); \ + __addr; \ +}) + +#endif /* __SCHED_EXT_COMMON_BPF_H */ diff --git a/tools/sched_ext/scx_example_dummy.bpf.c b/tools/sched_ext/scx_example_dummy.bpf.c new file mode 100644 index 000000000000..ac7b490b5a39 --- /dev/null +++ b/tools/sched_ext/scx_example_dummy.bpf.c @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * A minimal dummy scheduler. + * + * In terms of scheduling, this behaves the same as not specifying any ops at + * all - a global FIFO. The only things it adds are the following niceties: + * + * - Statistics tracking how many are queued to local and global dsq's. + * - Termination notification for userspace. + * + * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2022 Tejun Heo + * Copyright (c) 2022 David Vernet + */ +#include "scx_common.bpf.h" + +char _license[] SEC("license") = "GPL"; + +struct user_exit_info uei; + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(key_size, sizeof(u32)); + __uint(value_size, sizeof(u64)); + __uint(max_entries, 2); /* [local, global] */ +} stats SEC(".maps"); + +static void stat_inc(u32 idx) +{ + u64 *cnt_p = bpf_map_lookup_elem(&stats, &idx); + if (cnt_p) + (*cnt_p)++; +} + +void BPF_STRUCT_OPS(dummy_enqueue, struct task_struct *p, u64 enq_flags) +{ + if (enq_flags & SCX_ENQ_LOCAL) { + stat_inc(0); + scx_bpf_dispatch(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, enq_flags); + } else { + stat_inc(1); + scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags); + } +} + +void BPF_STRUCT_OPS(dummy_exit, struct scx_exit_info *ei) +{ + uei_record(&uei, ei); +} + +SEC(".struct_ops") +struct sched_ext_ops dummy_ops = { + .enqueue = (void *)dummy_enqueue, + .exit = (void *)dummy_exit, + .name = "dummy", +}; diff --git a/tools/sched_ext/scx_example_dummy.c b/tools/sched_ext/scx_example_dummy.c new file mode 100644 index 000000000000..72881c881830 --- /dev/null +++ b/tools/sched_ext/scx_example_dummy.c @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2022 Tejun Heo + * Copyright (c) 2022 David Vernet + */ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include "user_exit_info.h" +#include "scx_example_dummy.skel.h" + +const char help_fmt[] = +"A minimal dummy sched_ext scheduler.\n" +"\n" +"See the top-level comment in .bpf.c for more details.\n" +"\n" +"Usage: %s\n" +"\n" +" -h Display this help and exit\n"; + +static volatile int exit_req; + +static void sigint_handler(int dummy) +{ + exit_req = 1; +} + +static void read_stats(struct scx_example_dummy *skel, u64 *stats) +{ + int nr_cpus = libbpf_num_possible_cpus(); + u64 cnts[2][nr_cpus]; + u32 idx; + + memset(stats, 0, sizeof(stats[0]) * 2); + + for (idx = 0; idx < 2; idx++) { + int ret, cpu; + + ret = bpf_map_lookup_elem(bpf_map__fd(skel->maps.stats), + &idx, cnts[idx]); + if (ret < 0) + continue; + for (cpu = 0; cpu < nr_cpus; cpu++) + stats[idx] += cnts[idx][cpu]; + } +} + +int main(int argc, char **argv) +{ + struct scx_example_dummy *skel; + struct bpf_link *link; + u32 opt; + + signal(SIGINT, sigint_handler); + signal(SIGTERM, sigint_handler); + + libbpf_set_strict_mode(LIBBPF_STRICT_ALL); + + skel = scx_example_dummy__open(); + assert(skel); + + while ((opt = getopt(argc, argv, "h")) != -1) { + switch (opt) { + default: + fprintf(stderr, help_fmt, basename(argv[0])); + return opt != 'h'; + } + } + + assert(!scx_example_dummy__load(skel)); + + link = bpf_map__attach_struct_ops(skel->maps.dummy_ops); + assert(link); + + while (!exit_req && !uei_exited(&skel->bss->uei)) { + u64 stats[2]; + + read_stats(skel, stats); + printf("local=%lu global=%lu\n", stats[0], stats[1]); + fflush(stdout); + sleep(1); + } + + bpf_link__destroy(link); + uei_print(&skel->bss->uei); + scx_example_dummy__destroy(skel); + return 0; +} diff --git a/tools/sched_ext/scx_example_qmap.bpf.c b/tools/sched_ext/scx_example_qmap.bpf.c new file mode 100644 index 000000000000..06a07c834b42 --- /dev/null +++ b/tools/sched_ext/scx_example_qmap.bpf.c @@ -0,0 +1,238 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * A simple five-level FIFO queue scheduler. + * + * There are five FIFOs implemented using BPF_MAP_TYPE_QUEUE. A task gets + * assigned to one depending on its compound weight. Each CPU round robins + * through the FIFOs and dispatches more from FIFOs with higher indices - 1 from + * queue0, 2 from queue1, 4 from queue2 and so on. + * + * This scheduler demonstrates: + * + * - BPF-side queueing using PIDs. + * - Sleepable per-task storage allocation using ops.prep_enable(). + * + * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2022 Tejun Heo + * Copyright (c) 2022 David Vernet + */ +#include "scx_common.bpf.h" +#include + +char _license[] SEC("license") = "GPL"; + +const volatile u64 slice_ns = SCX_SLICE_DFL; + +u32 test_error_cnt; + +struct user_exit_info uei; + +struct qmap { + __uint(type, BPF_MAP_TYPE_QUEUE); + __uint(max_entries, 4096); + __type(value, u32); +} queue0 SEC(".maps"), + queue1 SEC(".maps"), + queue2 SEC(".maps"), + queue3 SEC(".maps"), + queue4 SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); + __uint(max_entries, 5); + __type(key, int); + __array(values, struct qmap); +} queue_arr SEC(".maps") = { + .values = { + [0] = &queue0, + [1] = &queue1, + [2] = &queue2, + [3] = &queue3, + [4] = &queue4, + }, +}; + +/* Per-task scheduling context */ +struct task_ctx { + bool force_local; /* Dispatch directly to local_dsq */ +}; + +struct { + __uint(type, BPF_MAP_TYPE_TASK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct task_ctx); +} task_ctx_stor SEC(".maps"); + +/* Per-cpu dispatch index and remaining count */ +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(max_entries, 2); + __type(key, u32); + __type(value, u64); +} dispatch_idx_cnt SEC(".maps"); + +/* Statistics */ +unsigned long nr_enqueued, nr_dispatched, nr_dequeued; + +s32 BPF_STRUCT_OPS(qmap_select_cpu, struct task_struct *p, + s32 prev_cpu, u64 wake_flags) +{ + struct task_ctx *tctx; + s32 cpu; + + tctx = bpf_task_storage_get(&task_ctx_stor, p, 0, 0); + if (!tctx) { + scx_bpf_error("task_ctx lookup failed"); + return -ESRCH; + } + + if (p->nr_cpus_allowed == 1 || + scx_bpf_test_and_clear_cpu_idle(prev_cpu)) { + tctx->force_local = true; + return prev_cpu; + } + + cpu = scx_bpf_pick_idle_cpu(p->cpus_ptr); + if (cpu >= 0) + return cpu; + + return prev_cpu; +} + +static int weight_to_idx(u32 weight) +{ + /* Coarsely map the compound weight to a FIFO. */ + if (weight <= 25) + return 0; + else if (weight <= 50) + return 1; + else if (weight < 200) + return 2; + else if (weight < 400) + return 3; + else + return 4; +} + +void BPF_STRUCT_OPS(qmap_enqueue, struct task_struct *p, u64 enq_flags) +{ + struct task_ctx *tctx; + u32 pid = p->pid; + int idx = weight_to_idx(p->scx.weight); + void *ring; + + if (test_error_cnt && !--test_error_cnt) + scx_bpf_error("test triggering error"); + + tctx = bpf_task_storage_get(&task_ctx_stor, p, 0, 0); + if (!tctx) { + scx_bpf_error("task_ctx lookup failed"); + return; + } + + /* Is select_cpu() is telling us to enqueue locally? */ + if (tctx->force_local) { + tctx->force_local = false; + scx_bpf_dispatch(p, SCX_DSQ_LOCAL, slice_ns, enq_flags); + return; + } + + ring = bpf_map_lookup_elem(&queue_arr, &idx); + if (!ring) { + scx_bpf_error("failed to find ring %d", idx); + return; + } + + /* Queue on the selected FIFO. If the FIFO overflows, punt to global. */ + if (bpf_map_push_elem(ring, &pid, 0)) { + scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, slice_ns, enq_flags); + return; + } + + __sync_fetch_and_add(&nr_enqueued, 1); +} + +/* + * The BPF queue map doesn't support removal and sched_ext can handle spurious + * dispatches. qmap_dequeue() is only used to collect statistics. + */ +void BPF_STRUCT_OPS(qmap_dequeue, struct task_struct *p, u64 deq_flags) +{ + __sync_fetch_and_add(&nr_dequeued, 1); +} + +void BPF_STRUCT_OPS(qmap_dispatch, s32 cpu, struct task_struct *prev) +{ + u32 zero = 0, one = 1; + u64 *idx = bpf_map_lookup_elem(&dispatch_idx_cnt, &zero); + u64 *cnt = bpf_map_lookup_elem(&dispatch_idx_cnt, &one); + void *fifo; + s32 pid; + int i; + + if (!idx || !cnt) { + scx_bpf_error("failed to lookup idx[%p], cnt[%p]", idx, cnt); + return; + } + + for (i = 0; i < 5; i++) { + /* Advance the dispatch cursor and pick the fifo. */ + if (!*cnt) { + *idx = (*idx + 1) % 5; + *cnt = 1 << *idx; + } + (*cnt)--; + + fifo = bpf_map_lookup_elem(&queue_arr, idx); + if (!fifo) { + scx_bpf_error("failed to find ring %llu", *idx); + return; + } + + /* Dispatch or advance. */ + if (!bpf_map_pop_elem(fifo, &pid)) { + struct task_struct *p; + + p = bpf_task_from_pid(pid); + if (p) { + __sync_fetch_and_add(&nr_dispatched, 1); + scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, slice_ns, 0); + bpf_task_release(p); + return; + } + } + + *cnt = 0; + } +} + +s32 BPF_STRUCT_OPS(qmap_prep_enable, struct task_struct *p, + struct scx_enable_args *args) +{ + /* + * @p is new. Let's ensure that its task_ctx is available. We can sleep + * in this function and the following will automatically use GFP_KERNEL. + */ + if (bpf_task_storage_get(&task_ctx_stor, p, 0, + BPF_LOCAL_STORAGE_GET_F_CREATE)) + return 0; + else + return -ENOMEM; +} + +void BPF_STRUCT_OPS(qmap_exit, struct scx_exit_info *ei) +{ + uei_record(&uei, ei); +} + +SEC(".struct_ops") +struct sched_ext_ops qmap_ops = { + .select_cpu = (void *)qmap_select_cpu, + .enqueue = (void *)qmap_enqueue, + .dequeue = (void *)qmap_dequeue, + .dispatch = (void *)qmap_dispatch, + .prep_enable = (void *)qmap_prep_enable, + .exit = (void *)qmap_exit, + .name = "qmap", +}; diff --git a/tools/sched_ext/scx_example_qmap.c b/tools/sched_ext/scx_example_qmap.c new file mode 100644 index 000000000000..c6c74641a182 --- /dev/null +++ b/tools/sched_ext/scx_example_qmap.c @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2022 Tejun Heo + * Copyright (c) 2022 David Vernet + */ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include "user_exit_info.h" +#include "scx_example_qmap.skel.h" + +const char help_fmt[] = +"A simple five-level FIFO queue sched_ext scheduler.\n" +"\n" +"See the top-level comment in .bpf.c for more details.\n" +"\n" +"Usage: %s [-s SLICE_US] [-e COUNT]\n" +"\n" +" -s SLICE_US Override slice duration\n" +" -e COUNT Trigger scx_bpf_error() after COUNT enqueues\n" +" -h Display this help and exit\n"; + +static volatile int exit_req; + +static void sigint_handler(int dummy) +{ + exit_req = 1; +} + +int main(int argc, char **argv) +{ + struct scx_example_qmap *skel; + struct bpf_link *link; + int opt; + + signal(SIGINT, sigint_handler); + signal(SIGTERM, sigint_handler); + + libbpf_set_strict_mode(LIBBPF_STRICT_ALL); + + skel = scx_example_qmap__open(); + assert(skel); + + while ((opt = getopt(argc, argv, "hs:e:tTd:")) != -1) { + switch (opt) { + case 's': + skel->rodata->slice_ns = strtoull(optarg, NULL, 0) * 1000; + break; + case 'e': + skel->bss->test_error_cnt = strtoull(optarg, NULL, 0); + break; + default: + fprintf(stderr, help_fmt, basename(argv[0])); + return opt != 'h'; + } + } + + assert(!scx_example_qmap__load(skel)); + + link = bpf_map__attach_struct_ops(skel->maps.qmap_ops); + assert(link); + + while (!exit_req && !uei_exited(&skel->bss->uei)) { + long nr_enqueued = skel->bss->nr_enqueued; + long nr_dispatched = skel->bss->nr_dispatched; + + printf("enq=%lu, dsp=%lu, delta=%ld, deq=%lu\n", + nr_enqueued, nr_dispatched, nr_enqueued - nr_dispatched, + skel->bss->nr_dequeued); + fflush(stdout); + sleep(1); + } + + bpf_link__destroy(link); + uei_print(&skel->bss->uei); + scx_example_qmap__destroy(skel); + return 0; +} diff --git a/tools/sched_ext/user_exit_info.h b/tools/sched_ext/user_exit_info.h new file mode 100644 index 000000000000..e701ef0e0b86 --- /dev/null +++ b/tools/sched_ext/user_exit_info.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Define struct user_exit_info which is shared between BPF and userspace parts + * to communicate exit status and other information. + * + * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2022 Tejun Heo + * Copyright (c) 2022 David Vernet + */ +#ifndef __USER_EXIT_INFO_H +#define __USER_EXIT_INFO_H + +struct user_exit_info { + int type; + char reason[128]; + char msg[1024]; +}; + +#ifdef __bpf__ + +#include "vmlinux.h" +#include + +static inline void uei_record(struct user_exit_info *uei, + const struct scx_exit_info *ei) +{ + bpf_probe_read_kernel_str(uei->reason, sizeof(uei->reason), ei->reason); + bpf_probe_read_kernel_str(uei->msg, sizeof(uei->msg), ei->msg); + /* use __sync to force memory barrier */ + __sync_val_compare_and_swap(&uei->type, uei->type, ei->type); +} + +#else /* !__bpf__ */ + +static inline bool uei_exited(struct user_exit_info *uei) +{ + /* use __sync to force memory barrier */ + return __sync_val_compare_and_swap(&uei->type, -1, -1); +} + +static inline void uei_print(const struct user_exit_info *uei) +{ + fprintf(stderr, "EXIT: %s", uei->reason); + if (uei->msg[0] != '\0') + fprintf(stderr, " (%s)", uei->msg); + fputs("\n", stderr); +} + +#endif /* __bpf__ */ +#endif /* __USER_EXIT_INFO_H */ From patchwork Sat Jan 28 00:16:24 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 13119568 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 7D1FAC38142 for ; Sat, 28 Jan 2023 00:18:28 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232786AbjA1AS0 (ORCPT ); Fri, 27 Jan 2023 19:18:26 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:36740 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232778AbjA1AR5 (ORCPT ); Fri, 27 Jan 2023 19:17:57 -0500 Received: from mail-pl1-x62f.google.com (mail-pl1-x62f.google.com [IPv6:2607:f8b0:4864:20::62f]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id D851C8CC4A; Fri, 27 Jan 2023 16:17:20 -0800 (PST) Received: by mail-pl1-x62f.google.com with SMTP id be8so6561006plb.7; Fri, 27 Jan 2023 16:17:20 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=wECB+kv9zmbuEsGDrXMjNkxOFX+6xuQqOVDwucXIlds=; b=jNoXXkGxSM+6YWVkc9Dt3d7uFesca5+fVGhkhaDuDvKXiOvs7lOj6qfm2IxpYO1pGl iRXines4P7kAb6S7AewwaZPmyo5N3/AIdGxbLulvd1oJQQoVc2cAfs3QN8Eo3Oh+UHus bGEaCRKONAEFKN1jBnDypZmzzhCnsPsU3RfNKJdHgpapKL+3R9bmZmd2gh4vzxKjQb41 5og991aR31OxVY/DOL0iVdiFetUSoL7oC6opdXUNgfRp3Ji4yvMVIc5O4b0NPkipNPSb pg/EOiVs7LC2QfuYgfK9SLJSU2vJ9XPAQU+YBAiFOHSjYbpG1KrC/7WXAwJy8RB3CDdz HD3Q== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=wECB+kv9zmbuEsGDrXMjNkxOFX+6xuQqOVDwucXIlds=; b=4EzHcKlCKvXqkKS2OuHNTlO9zuSlO++9LZ9UtyRY7CFSzHntJl2CepOuppq+67gksR 8OdzTCWFsAV1yn3ytzcmykl0O8z1KE0DlWgOFH7HgstWDMjE7xxZ2+kL73GRLjR6ycE7 SsQwW+LsseiMy6LMT39bjb2A+2ONeX4UAPe1PJ6UYk2N/GbGpvw8QCNi1PGWmuQfUA56 hqGZngB2Yt19vk7VuFJAcNMWSAqifc+svfOiciSe9BY4U587zTnwO/2mcPvkw9dsfYoy kKGgxUDn6n26R8kClmwtJpfO5eUz8cWfZl9ap+5Ukjp+FRaPi6ZPwaR2iTwWsjo92DwA 81eg== X-Gm-Message-State: AFqh2koVUPowjovX6lsceh/ZQLCGlNeQ0mpWTdYtlatJb2e4b5OuOF/H a/l7mLVo+5OC7PpbqN+vTd8= X-Google-Smtp-Source: AMrXdXsNaHzO+J5iBJWnXXCJfT+FiSmHPvvbQwE3Elh1mIzfrn+xJrCkPJKQtNwFS/yAjY+yoObe9g== X-Received: by 2002:a05:6a20:8f0f:b0:b8:5515:9004 with SMTP id b15-20020a056a208f0f00b000b855159004mr55584650pzk.8.1674865039784; Fri, 27 Jan 2023 16:17:19 -0800 (PST) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id q13-20020a17090a304d00b0022908f1398dsm3270995pjl.32.2023.01.27.16.17.19 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 27 Jan 2023 16:17:19 -0800 (PST) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 15/30] sched_ext: Add sysrq-S which disables the BPF scheduler Date: Fri, 27 Jan 2023 14:16:24 -1000 Message-Id: <20230128001639.3510083-16-tj@kernel.org> X-Mailer: git-send-email 2.39.1 In-Reply-To: <20230128001639.3510083-1-tj@kernel.org> References: <20230128001639.3510083-1-tj@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org This enables the admin to abort the BPF scheduler and revert to CFS anytime. Signed-off-by: Tejun Heo Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- drivers/tty/sysrq.c | 1 + include/linux/sched/ext.h | 1 + kernel/sched/build_policy.c | 1 + kernel/sched/ext.c | 20 ++++++++++++++++++++ 4 files changed, 23 insertions(+) diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index b6e70c5cfa17..ddfcdb6aecd7 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -520,6 +520,7 @@ static const struct sysrq_key_op *sysrq_key_table[62] = { NULL, /* P */ NULL, /* Q */ NULL, /* R */ + /* S: May be registered by sched_ext for resetting */ NULL, /* S */ NULL, /* T */ NULL, /* U */ diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h index 8894b6a9977d..988d1e30e26c 100644 --- a/include/linux/sched/ext.h +++ b/include/linux/sched/ext.h @@ -55,6 +55,7 @@ enum scx_exit_type { SCX_EXIT_DONE, SCX_EXIT_UNREG = 64, /* BPF unregistration */ + SCX_EXIT_SYSRQ, /* requested by 'S' sysrq */ SCX_EXIT_ERROR = 1024, /* runtime error, error msg contains details */ SCX_EXIT_ERROR_BPF, /* ERROR but triggered through scx_bpf_error() */ diff --git a/kernel/sched/build_policy.c b/kernel/sched/build_policy.c index 4c658b21f603..005025f55bea 100644 --- a/kernel/sched/build_policy.c +++ b/kernel/sched/build_policy.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 38786d36a356..ebbf3f3c1cf7 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -1881,6 +1881,9 @@ static void scx_ops_disable_workfn(struct kthread_work *work) case SCX_EXIT_UNREG: reason = "BPF scheduler unregistered"; break; + case SCX_EXIT_SYSRQ: + reason = "disabled by sysrq-S"; + break; case SCX_EXIT_ERROR: reason = "runtime error"; break; @@ -2490,6 +2493,21 @@ struct bpf_struct_ops bpf_sched_ext_ops = { .name = "sched_ext_ops", }; +static void sysrq_handle_sched_ext_reset(int key) +{ + if (scx_ops_helper) + scx_ops_disable(SCX_EXIT_SYSRQ); + else + pr_info("sched_ext: BPF scheduler not yet used\n"); +} + +static const struct sysrq_key_op sysrq_sched_ext_reset_op = { + .handler = sysrq_handle_sched_ext_reset, + .help_msg = "reset-sched-ext(S)", + .action_msg = "Disable sched_ext and revert all tasks to CFS", + .enable_mask = SYSRQ_ENABLE_RTNICE, +}; + void __init init_sched_ext_class(void) { int cpu; @@ -2513,6 +2531,8 @@ void __init init_sched_ext_class(void) init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL); } + + register_sysrq_key('S', &sysrq_sched_ext_reset_op); } From patchwork Sat Jan 28 00:16:25 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 13119571 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id A1343C38142 for ; Sat, 28 Jan 2023 00:19:11 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232910AbjA1ATK (ORCPT ); Fri, 27 Jan 2023 19:19:10 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:36768 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231599AbjA1ASF (ORCPT ); Fri, 27 Jan 2023 19:18:05 -0500 Received: from mail-pl1-x634.google.com (mail-pl1-x634.google.com [IPv6:2607:f8b0:4864:20::634]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 74BD87CCB2; Fri, 27 Jan 2023 16:17:29 -0800 (PST) Received: by mail-pl1-x634.google.com with SMTP id v23so6592297plo.1; Fri, 27 Jan 2023 16:17:29 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=ShvHxKSvwBj/EM2eUFVCPEUd79aoyWNar7Xjs0gX+VI=; b=MuYrxfRzqS0pNVRRpV6OUWON+D10yphHLf19wJqYr5KSpGjYaD7O5cprW7ajyjeVFN tQMacVNywpwmpbKLCji1IQCJRex1Uiw+naRWsuC5f0y9L+/XmCI6c0nqWWZ/hCNeAWgM zUuWxB89+17IGS7ZIdAc0f/+xO7uJywuvecWmIxBEbAmMUZyzTwLZTYjP1RFVj0gOJRW kb/XnhGlEo40cwnk3CTmGIsFswRlXuUX4Ud9hdFYXSANljlEDEDbpKSq2GcA0V2EZoth d9/GQ4TCQJtTSSmDMwWO/jJ/YGZTS34995+yMepDcjFiCdv8IYfmvJp+AI1smh9FdJTI snug== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=ShvHxKSvwBj/EM2eUFVCPEUd79aoyWNar7Xjs0gX+VI=; b=279AxleBl9TwTPs64vh2ql5Y8Rm8ZxRs5tHihwBAkKtkEpydMjEWslAkIfbbKxqQBI RW0MKVWWez6QdjvAVVq9uryEe8hnttSX4mU7viQ9p0jVriDJDSVot0JyDwjGwx+EyIzO tkgYD1EUayBvXzGeSt4Se9dl7vJu+M9etbYdDtKMTmdUw33V2qwbytE4QgZ0egNrUn51 GqsnO0w2cti8hC6DHOd5fG4rTonvKVmZD1XAi1C+aodC1bfRbIQKHbHU9ls9g8oU0SF0 qnBaYzh+kgVo1XnT/A+G59Il80czHnAEZKYnh9J6uqgNf/TGojUfaqXgcMTc4H4uSxVI ZL+Q== X-Gm-Message-State: AFqh2krSChEI+f3lHijewj6xxLVxOUwHr8ZcXsA0REvYZublukMW5wg8 zNEfzYOsyRk+F3QJZM8nQy+3aOk/GBw= X-Google-Smtp-Source: AMrXdXuNjRdTROrVaUoV6b/cdf05Ut3DAhXyG5LeIlrQ1jLVvddFEMs+Jyyr4yO4XJrE0DYohpIpzg== X-Received: by 2002:a17:902:b213:b0:189:e3d0:daf8 with SMTP id t19-20020a170902b21300b00189e3d0daf8mr39278315plr.55.1674865041676; Fri, 27 Jan 2023 16:17:21 -0800 (PST) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id k14-20020a170902ce0e00b0018c990ce7fesm3378804plg.239.2023.01.27.16.17.20 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 27 Jan 2023 16:17:21 -0800 (PST) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo , Julia Lawall Subject: [PATCH 16/30] sched_ext: Implement runnable task stall watchdog Date: Fri, 27 Jan 2023 14:16:25 -1000 Message-Id: <20230128001639.3510083-17-tj@kernel.org> X-Mailer: git-send-email 2.39.1 In-Reply-To: <20230128001639.3510083-1-tj@kernel.org> References: <20230128001639.3510083-1-tj@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org From: David Vernet The most common and critical way that a BPF scheduler can misbehave is by failing to run runnable tasks for too long. This patch implements a watchdog. * All tasks record when they become runnable. * A watchdog work periodically scans all runnable tasks. If any task has stayed runnable for too long, the BPF scheduler is aborted. * scheduler_tick() monitors whether the watchdog itself is stuck. If so, the BPF scheduler is aborted. Because the watchdog only scans the tasks which are currently runnable and usually very infrequently, the overhead should be negligible. scx_example_qmap is updated so that it can be told to stall user and/or kernel tasks. A detected task stall looks like the following: sched_ext: BPF scheduler "qmap" errored, disabling sched_ext: runnable task stall (dbus-daemon[953] failed to run for 6.478s) scx_check_timeout_workfn+0x10e/0x1b0 process_one_work+0x287/0x560 worker_thread+0x234/0x420 kthread+0xe9/0x100 ret_from_fork+0x1f/0x30 A detected watchdog stall: sched_ext: BPF scheduler "qmap" errored, disabling sched_ext: runnable task stall (watchdog failed to check in for 5.001s) scheduler_tick+0x2eb/0x340 update_process_times+0x7a/0x90 tick_sched_timer+0xd8/0x130 __hrtimer_run_queues+0x178/0x3b0 hrtimer_interrupt+0xfc/0x390 __sysvec_apic_timer_interrupt+0xb7/0x2b0 sysvec_apic_timer_interrupt+0x90/0xb0 asm_sysvec_apic_timer_interrupt+0x1b/0x20 default_idle+0x14/0x20 arch_cpu_idle+0xf/0x20 default_idle_call+0x50/0x90 do_idle+0xe8/0x240 cpu_startup_entry+0x1d/0x20 kernel_init+0x0/0x190 start_kernel+0x0/0x392 start_kernel+0x324/0x392 x86_64_start_reservations+0x2a/0x2c x86_64_start_kernel+0x104/0x109 secondary_startup_64_no_verify+0xce/0xdb Note that this patch exposes scx_ops_error[_type]() in kernel/sched/ext.h to inline scx_notify_sched_tick(). v2: Julia Lawall noticed that the watchdog code was mixing msecs and jiffies. Fix by using jiffies for everything. Signed-off-by: David Vernet Reviewed-by: Tejun Heo Signed-off-by: Tejun Heo Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden Cc: Julia Lawall --- include/linux/sched/ext.h | 13 +++ init/init_task.c | 2 + kernel/sched/core.c | 3 + kernel/sched/ext.c | 128 +++++++++++++++++++++++-- kernel/sched/ext.h | 25 +++++ kernel/sched/sched.h | 1 + tools/sched_ext/scx_example_qmap.bpf.c | 12 +++ tools/sched_ext/scx_example_qmap.c | 14 ++- 8 files changed, 186 insertions(+), 12 deletions(-) diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h index 988d1e30e26c..474a8c0a0b12 100644 --- a/include/linux/sched/ext.h +++ b/include/linux/sched/ext.h @@ -59,6 +59,7 @@ enum scx_exit_type { SCX_EXIT_ERROR = 1024, /* runtime error, error msg contains details */ SCX_EXIT_ERROR_BPF, /* ERROR but triggered through scx_bpf_error() */ + SCX_EXIT_ERROR_STALL, /* watchdog detected stalled runnable tasks */ }; /* @@ -307,6 +308,15 @@ struct sched_ext_ops { */ u64 flags; + /** + * timeout_ms - The maximum amount of time, in milliseconds, that a + * runnable task should be able to wait before being scheduled. The + * maximum timeout may not exceed the default timeout of 30 seconds. + * + * Defaults to the maximum allowed timeout value of 30 seconds. + */ + u32 timeout_ms; + /** * name - BPF scheduler's name * @@ -340,6 +350,7 @@ enum scx_ent_flags { SCX_TASK_OPS_PREPPED = 1 << 3, /* prepared for BPF scheduler enable */ SCX_TASK_OPS_ENABLED = 1 << 4, /* task has BPF scheduler enabled */ + SCX_TASK_WATCHDOG_RESET = 1 << 5, /* task watchdog counter should be reset */ SCX_TASK_DEQD_FOR_SLEEP = 1 << 6, /* last dequeue was for SLEEP */ SCX_TASK_CURSOR = 1 << 7, /* iteration cursor, not a task */ @@ -369,12 +380,14 @@ enum scx_kf_mask { struct sched_ext_entity { struct scx_dispatch_q *dsq; struct list_head dsq_node; + struct list_head watchdog_node; u32 flags; /* protected by rq lock */ u32 weight; s32 sticky_cpu; s32 holding_cpu; u32 kf_mask; /* see scx_kf_mask above */ atomic64_t ops_state; + unsigned long runnable_at; /* BPF scheduler modifiable fields */ diff --git a/init/init_task.c b/init/init_task.c index bdbc663107bf..913194aab623 100644 --- a/init/init_task.c +++ b/init/init_task.c @@ -106,9 +106,11 @@ struct task_struct init_task #ifdef CONFIG_SCHED_CLASS_EXT .scx = { .dsq_node = LIST_HEAD_INIT(init_task.scx.dsq_node), + .watchdog_node = LIST_HEAD_INIT(init_task.scx.watchdog_node), .sticky_cpu = -1, .holding_cpu = -1, .ops_state = ATOMIC_INIT(0), + .runnable_at = INITIAL_JIFFIES, .slice = SCX_SLICE_DFL, }, #endif diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 804aa291b837..3f177c161c1e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4424,12 +4424,14 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) #ifdef CONFIG_SCHED_CLASS_EXT p->scx.dsq = NULL; INIT_LIST_HEAD(&p->scx.dsq_node); + INIT_LIST_HEAD(&p->scx.watchdog_node); p->scx.flags = 0; p->scx.weight = 0; p->scx.sticky_cpu = -1; p->scx.holding_cpu = -1; p->scx.kf_mask = 0; atomic64_set(&p->scx.ops_state, 0); + p->scx.runnable_at = INITIAL_JIFFIES; p->scx.slice = SCX_SLICE_DFL; #endif @@ -5584,6 +5586,7 @@ void scheduler_tick(void) if (sched_feat(LATENCY_WARN) && resched_latency) resched_latency_warn(cpu, resched_latency); + scx_notify_sched_tick(); perf_event_task_tick(); #ifdef CONFIG_SMP diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index ebbf3f3c1cf7..1af74ea8ed42 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -9,6 +9,7 @@ enum scx_internal_consts { SCX_NR_ONLINE_OPS = SCX_OP_IDX(init), SCX_DSP_DFL_MAX_BATCH = 32, + SCX_WATCHDOG_MAX_TIMEOUT = 30 * HZ, }; enum scx_ops_enable_state { @@ -87,6 +88,23 @@ static struct scx_exit_info scx_exit_info; static atomic64_t scx_nr_rejected = ATOMIC64_INIT(0); +/* + * The maximum amount of time in jiffies that a task may be runnable without + * being scheduled on a CPU. If this timeout is exceeded, it will trigger + * scx_ops_error(). + */ +unsigned long scx_watchdog_timeout; + +/* + * The last time the delayed work was run. This delayed work relies on + * ksoftirqd being able to run to service timer interrupts, so it's possible + * that this work itself could get wedged. To account for this, we check that + * it's not stalled in the timer tick, and trigger an error if it is. + */ +unsigned long scx_watchdog_timestamp = INITIAL_JIFFIES; + +static struct delayed_work scx_watchdog_work; + /* idle tracking */ #ifdef CONFIG_SMP #ifdef CONFIG_CPUMASK_OFFSTACK @@ -146,10 +164,6 @@ static DEFINE_PER_CPU(struct scx_dsp_ctx, scx_dsp_ctx); void scx_bpf_dispatch(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags); -__printf(2, 3) static void scx_ops_error_type(enum scx_exit_type type, - const char *fmt, ...); -#define scx_ops_error(fmt, args...) \ - scx_ops_error_type(SCX_EXIT_ERROR, fmt, ##args) struct scx_task_iter { struct sched_ext_entity cursor; @@ -668,6 +682,27 @@ static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags, dispatch_enqueue(&scx_dsq_global, p, enq_flags); } +static bool watchdog_task_watched(const struct task_struct *p) +{ + return !list_empty(&p->scx.watchdog_node); +} + +static void watchdog_watch_task(struct rq *rq, struct task_struct *p) +{ + lockdep_assert_rq_held(rq); + if (p->scx.flags & SCX_TASK_WATCHDOG_RESET) + p->scx.runnable_at = jiffies; + p->scx.flags &= ~SCX_TASK_WATCHDOG_RESET; + list_add_tail(&p->scx.watchdog_node, &rq->scx.watchdog_list); +} + +static void watchdog_unwatch_task(struct task_struct *p, bool reset_timeout) +{ + list_del_init(&p->scx.watchdog_node); + if (reset_timeout) + p->scx.flags |= SCX_TASK_WATCHDOG_RESET; +} + static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags) { int sticky_cpu = p->scx.sticky_cpu; @@ -684,9 +719,12 @@ static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags if (unlikely(enq_flags & ENQUEUE_RESTORE) && task_current(rq, p)) sticky_cpu = cpu_of(rq); - if (p->scx.flags & SCX_TASK_QUEUED) + if (p->scx.flags & SCX_TASK_QUEUED) { + WARN_ON_ONCE(!watchdog_task_watched(p)); return; + } + watchdog_watch_task(rq, p); p->scx.flags |= SCX_TASK_QUEUED; rq->scx.nr_running++; add_nr_running(rq, 1); @@ -698,6 +736,8 @@ static void ops_dequeue(struct task_struct *p, u64 deq_flags) { u64 opss; + watchdog_unwatch_task(p, false); + /* acquire ensures that we see the preceding updates on QUEUED */ opss = atomic64_read_acquire(&p->scx.ops_state); @@ -742,8 +782,10 @@ static void dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags { struct scx_rq *scx_rq = &rq->scx; - if (!(p->scx.flags & SCX_TASK_QUEUED)) + if (!(p->scx.flags & SCX_TASK_QUEUED)) { + WARN_ON_ONCE(watchdog_task_watched(p)); return; + } ops_dequeue(p, deq_flags); @@ -1256,6 +1298,8 @@ static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first) } p->se.exec_start = rq_clock_task(rq); + + watchdog_unwatch_task(p, true); } static void put_prev_task_scx(struct rq *rq, struct task_struct *p) @@ -1299,11 +1343,14 @@ static void put_prev_task_scx(struct rq *rq, struct task_struct *p) */ if (p->scx.flags & SCX_TASK_BAL_KEEP) { p->scx.flags &= ~SCX_TASK_BAL_KEEP; + watchdog_watch_task(rq, p); dispatch_enqueue(&rq->scx.local_dsq, p, SCX_ENQ_HEAD); return; } if (p->scx.flags & SCX_TASK_QUEUED) { + watchdog_watch_task(rq, p); + /* * If @p has slice left and balance_scx() didn't tag it for * keeping, @p is getting preempted by a higher priority @@ -1530,6 +1577,49 @@ static void reset_idle_masks(void) {} #endif /* CONFIG_SMP */ +static bool check_rq_for_timeouts(struct rq *rq) +{ + struct task_struct *p; + struct rq_flags rf; + bool timed_out = false; + + rq_lock_irqsave(rq, &rf); + list_for_each_entry(p, &rq->scx.watchdog_list, scx.watchdog_node) { + unsigned long last_runnable = p->scx.runnable_at; + + if (unlikely(time_after(jiffies, + last_runnable + scx_watchdog_timeout))) { + u32 dur_ms = jiffies_to_msecs(jiffies - last_runnable); + + scx_ops_error_type(SCX_EXIT_ERROR_STALL, + "%s[%d] failed to run for %u.%03us", + p->comm, p->pid, + dur_ms / 1000, dur_ms % 1000); + timed_out = true; + break; + } + } + rq_unlock_irqrestore(rq, &rf); + + return timed_out; +} + +static void scx_watchdog_workfn(struct work_struct *work) +{ + int cpu; + + scx_watchdog_timestamp = jiffies; + + for_each_online_cpu(cpu) { + if (unlikely(check_rq_for_timeouts(cpu_rq(cpu)))) + break; + + cond_resched(); + } + queue_delayed_work(system_unbound_wq, to_delayed_work(work), + scx_watchdog_timeout / 2); +} + static void task_tick_scx(struct rq *rq, struct task_struct *curr, int queued) { update_curr_scx(rq); @@ -1561,7 +1651,7 @@ static int scx_ops_prepare_task(struct task_struct *p, struct task_group *tg) } } - p->scx.flags |= SCX_TASK_OPS_PREPPED; + p->scx.flags |= (SCX_TASK_OPS_PREPPED | SCX_TASK_WATCHDOG_RESET); return 0; } @@ -1877,6 +1967,8 @@ static void scx_ops_disable_workfn(struct kthread_work *work) break; } + cancel_delayed_work_sync(&scx_watchdog_work); + switch (type) { case SCX_EXIT_UNREG: reason = "BPF scheduler unregistered"; @@ -1890,6 +1982,9 @@ static void scx_ops_disable_workfn(struct kthread_work *work) case SCX_EXIT_ERROR_BPF: reason = "scx_bpf_error"; break; + case SCX_EXIT_ERROR_STALL: + reason = "runnable task stall"; + break; default: reason = ""; } @@ -2074,8 +2169,8 @@ static void scx_ops_error_irq_workfn(struct irq_work *irq_work) static DEFINE_IRQ_WORK(scx_ops_error_irq_work, scx_ops_error_irq_workfn); -__printf(2, 3) static void scx_ops_error_type(enum scx_exit_type type, - const char *fmt, ...) +__printf(2, 3) void scx_ops_error_type(enum scx_exit_type type, + const char *fmt, ...) { struct scx_exit_info *ei = &scx_exit_info; int none = SCX_EXIT_NONE; @@ -2174,6 +2269,14 @@ static int scx_ops_enable(struct sched_ext_ops *ops) goto err_disable; } + scx_watchdog_timeout = SCX_WATCHDOG_MAX_TIMEOUT; + if (ops->timeout_ms) + scx_watchdog_timeout = msecs_to_jiffies(ops->timeout_ms); + + scx_watchdog_timestamp = jiffies; + queue_delayed_work(system_unbound_wq, &scx_watchdog_work, + scx_watchdog_timeout / 2); + /* * Lock out forks before opening the floodgate so that they don't wander * into the operations prematurely. @@ -2433,6 +2536,11 @@ static int bpf_scx_init_member(const struct btf_type *t, if (ret == 0) return -EINVAL; return 1; + case offsetof(struct sched_ext_ops, timeout_ms): + if (*(u32 *)(udata + moff) > SCX_WATCHDOG_MAX_TIMEOUT) + return -E2BIG; + ops->timeout_ms = *(u32 *)(udata + moff); + return 1; } return 0; @@ -2530,9 +2638,11 @@ void __init init_sched_ext_class(void) struct rq *rq = cpu_rq(cpu); init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL); + INIT_LIST_HEAD(&rq->scx.watchdog_list); } register_sysrq_key('S', &sysrq_sched_ext_reset_op); + INIT_DELAYED_WORK(&scx_watchdog_work, scx_watchdog_workfn); } diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h index f8d5682deacf..7dfa7b888487 100644 --- a/kernel/sched/ext.h +++ b/kernel/sched/ext.h @@ -56,6 +56,8 @@ enum scx_deq_flags { extern const struct sched_class ext_sched_class; extern const struct bpf_verifier_ops bpf_sched_ext_verifier_ops; extern const struct file_operations sched_ext_fops; +extern unsigned long scx_watchdog_timeout; +extern unsigned long scx_watchdog_timestamp; DECLARE_STATIC_KEY_FALSE(__scx_ops_enabled); #define scx_enabled() static_branch_unlikely(&__scx_ops_enabled) @@ -67,6 +69,28 @@ void scx_post_fork(struct task_struct *p); void scx_cancel_fork(struct task_struct *p); void init_sched_ext_class(void); +__printf(2, 3) void scx_ops_error_type(enum scx_exit_type type, + const char *fmt, ...); +#define scx_ops_error(fmt, args...) \ + scx_ops_error_type(SCX_EXIT_ERROR, fmt, ##args) + +static inline void scx_notify_sched_tick(void) +{ + unsigned long last_check; + + if (!scx_enabled()) + return; + + last_check = scx_watchdog_timestamp; + if (unlikely(time_after(jiffies, last_check + scx_watchdog_timeout))) { + u32 dur_ms = jiffies_to_msecs(jiffies - last_check); + + scx_ops_error_type(SCX_EXIT_ERROR_STALL, + "watchdog failed to check in for %u.%03us", + dur_ms / 1000, dur_ms % 1000); + } +} + static inline const struct sched_class *next_active_class(const struct sched_class *class) { class++; @@ -98,6 +122,7 @@ static inline int scx_fork(struct task_struct *p) { return 0; } static inline void scx_post_fork(struct task_struct *p) {} static inline void scx_cancel_fork(struct task_struct *p) {} static inline void init_sched_ext_class(void) {} +static inline void scx_notify_sched_tick(void) {} #define for_each_active_class for_each_class #define for_balance_class_range for_class_range diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 7778f4ce6b5b..112c2f127c95 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -687,6 +687,7 @@ struct cfs_rq { #ifdef CONFIG_SCHED_CLASS_EXT struct scx_rq { struct scx_dispatch_q local_dsq; + struct list_head watchdog_list; u64 ops_qseq; u32 nr_running; }; diff --git a/tools/sched_ext/scx_example_qmap.bpf.c b/tools/sched_ext/scx_example_qmap.bpf.c index 06a07c834b42..b22b8d82846e 100644 --- a/tools/sched_ext/scx_example_qmap.bpf.c +++ b/tools/sched_ext/scx_example_qmap.bpf.c @@ -22,6 +22,8 @@ char _license[] SEC("license") = "GPL"; const volatile u64 slice_ns = SCX_SLICE_DFL; +const volatile u32 stall_user_nth; +const volatile u32 stall_kernel_nth; u32 test_error_cnt; @@ -117,11 +119,20 @@ static int weight_to_idx(u32 weight) void BPF_STRUCT_OPS(qmap_enqueue, struct task_struct *p, u64 enq_flags) { + static u32 user_cnt, kernel_cnt; struct task_ctx *tctx; u32 pid = p->pid; int idx = weight_to_idx(p->scx.weight); void *ring; + if (p->flags & PF_KTHREAD) { + if (stall_kernel_nth && !(++kernel_cnt % stall_kernel_nth)) + return; + } else { + if (stall_user_nth && !(++user_cnt % stall_user_nth)) + return; + } + if (test_error_cnt && !--test_error_cnt) scx_bpf_error("test triggering error"); @@ -234,5 +245,6 @@ struct sched_ext_ops qmap_ops = { .dispatch = (void *)qmap_dispatch, .prep_enable = (void *)qmap_prep_enable, .exit = (void *)qmap_exit, + .timeout_ms = 5000U, .name = "qmap", }; diff --git a/tools/sched_ext/scx_example_qmap.c b/tools/sched_ext/scx_example_qmap.c index c6c74641a182..dd490a146b1a 100644 --- a/tools/sched_ext/scx_example_qmap.c +++ b/tools/sched_ext/scx_example_qmap.c @@ -20,10 +20,12 @@ const char help_fmt[] = "\n" "See the top-level comment in .bpf.c for more details.\n" "\n" -"Usage: %s [-s SLICE_US] [-e COUNT]\n" +"Usage: %s [-s SLICE_US] [-e COUNT] [-t COUNT] [-T COUNT]\n" "\n" " -s SLICE_US Override slice duration\n" " -e COUNT Trigger scx_bpf_error() after COUNT enqueues\n" +" -t COUNT Stall every COUNT'th user thread\n" +" -T COUNT Stall every COUNT'th kernel thread\n" " -h Display this help and exit\n"; static volatile int exit_req; @@ -47,13 +49,19 @@ int main(int argc, char **argv) skel = scx_example_qmap__open(); assert(skel); - while ((opt = getopt(argc, argv, "hs:e:tTd:")) != -1) { + while ((opt = getopt(argc, argv, "hs:e:t:T:d:")) != -1) { switch (opt) { case 's': skel->rodata->slice_ns = strtoull(optarg, NULL, 0) * 1000; break; case 'e': - skel->bss->test_error_cnt = strtoull(optarg, NULL, 0); + skel->bss->test_error_cnt = strtoul(optarg, NULL, 0); + break; + case 't': + skel->rodata->stall_user_nth = strtoul(optarg, NULL, 0); + break; + case 'T': + skel->rodata->stall_kernel_nth = strtoul(optarg, NULL, 0); break; default: fprintf(stderr, help_fmt, basename(argv[0])); From patchwork Sat Jan 28 00:16:26 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 13119572 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id B3B91C61DA7 for ; Sat, 28 Jan 2023 00:19:13 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S233058AbjA1ATM (ORCPT ); Fri, 27 Jan 2023 19:19:12 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:36802 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232917AbjA1ASP (ORCPT ); Fri, 27 Jan 2023 19:18:15 -0500 Received: from mail-pg1-x529.google.com (mail-pg1-x529.google.com [IPv6:2607:f8b0:4864:20::529]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 328858CE11; Fri, 27 Jan 2023 16:17:36 -0800 (PST) Received: by mail-pg1-x529.google.com with SMTP id s67so4252069pgs.3; Fri, 27 Jan 2023 16:17:36 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=XQBinvOSNBUqRTgAcPFdrMmOx4mlJq+YJiGICJK5s04=; b=hrk6PmOAd+boS58LO0yHar4BZT/Vq04lMGrOT+WoUkHmZaEj0rYYfPNngl2apE6BoN /MXgGn2/IG7k/FPPkfOgBMDT3G/g7Cv1gi05DMTrXVO8kk7FcsdNosLDeuGMs5upkSVw oFi8WqC+vIgGQzjag+z2dIZeQ9EBQok+ShPw9zASQPcy063CRJ+irp+GzbY4Y5MFeTSv 1Is8pAF5nqxICKq6RcGs5v2TQYgSJFVjZCkqNdNpSxheW+6O2sqDfDjy+lF/I5pRW/ha d3CsyEH9FN+uPjSy9XYlN1BkCzaKUEFgISn13mcvZn6bGzZdwA9+3RE+Lk2fX4kdIMV7 AzFQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=XQBinvOSNBUqRTgAcPFdrMmOx4mlJq+YJiGICJK5s04=; b=DWOaVfcYuAgMJEhn1G3YrDjpenJBDk/lxJhn5Na52x/nlBuIpjdUq3s+Ye68H/GT2a hIIfZnwG1KVn/lfnwXKMWBI0gDC5TiGu7PFeizyuH8oSd8hK2QL8ok78zdI+cTCFjs7C g8hJBY+ffG/VurwY13F6CteZ3jDQWLs7YVyVZGTdjSbCVSwH5ZiCzTOLa+J8SdoxX2qr A820Vq/YIc6HGSJW0olTYXIsmQ5W/feICn8WA7hF9qZiQDCbhj9HdFZSsjLs5SR4G21/ +1EeU1ae8sN13aVqDAoqAzfmyKazW6ALuFwAQn4Hqtw914its3N2eDmj0/WWpqea3rkI guEw== X-Gm-Message-State: AO0yUKU81R03TYUpq6ksyrfCdbGTyzXW2N58Uv6zfAjo6p2LS665cVaS w1sRY3GkhdH7tcQw6qoCPrg= X-Google-Smtp-Source: AK7set+tOVBV6u1YucVMwXfpmAXpleL/7VSTvnPsUqG6hXh8OLaqRTJ6c8Y5AMpjU0JjzJm1QGDobQ== X-Received: by 2002:aa7:82c5:0:b0:593:6e2f:b859 with SMTP id f5-20020aa782c5000000b005936e2fb859mr1661708pfn.4.1674865043430; Fri, 27 Jan 2023 16:17:23 -0800 (PST) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id h28-20020a056a00001c00b0059071156016sm3170691pfk.87.2023.01.27.16.17.22 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 27 Jan 2023 16:17:23 -0800 (PST) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 17/30] sched_ext: Allow BPF schedulers to disallow specific tasks from joining SCHED_EXT Date: Fri, 27 Jan 2023 14:16:26 -1000 Message-Id: <20230128001639.3510083-18-tj@kernel.org> X-Mailer: git-send-email 2.39.1 In-Reply-To: <20230128001639.3510083-1-tj@kernel.org> References: <20230128001639.3510083-1-tj@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org BPF schedulers might not want to schedule certain tasks - e.g. kernel threads. This patch adds p->scx.disallow which can be set by BPF schedulers in such cases. The field can be changed anytime and setting it in ops.prep_enable() guarantees that the task can never be scheduled by sched_ext. scx_example_qmap is updated with the -d option to disallow a specific PID: # echo $$ 1092 # egrep '(policy)|(ext\.enabled)' /proc/self/sched policy : 0 ext.enabled : 0 # ./set-scx 1092 # egrep '(policy)|(ext\.enabled)' /proc/self/sched policy : 7 ext.enabled : 0 Run "scx_example_qmap -d 1092" in another terminal. # grep rejected /sys/kernel/debug/sched/ext nr_rejected : 1 # egrep '(policy)|(ext\.enabled)' /proc/self/sched policy : 0 ext.enabled : 0 # ./set-scx 1092 setparam failed for 1092 (Permission denied) Signed-off-by: Tejun Heo Suggested-by: Barret Rhoden Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- include/linux/sched/ext.h | 12 ++++++++ kernel/sched/core.c | 4 +++ kernel/sched/ext.c | 38 ++++++++++++++++++++++++++ kernel/sched/ext.h | 3 ++ tools/sched_ext/scx_example_qmap.bpf.c | 4 +++ tools/sched_ext/scx_example_qmap.c | 8 +++++- 6 files changed, 68 insertions(+), 1 deletion(-) diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h index 474a8c0a0b12..b4c4b83a07f6 100644 --- a/include/linux/sched/ext.h +++ b/include/linux/sched/ext.h @@ -399,6 +399,18 @@ struct sched_ext_entity { */ u64 slice; + /* + * If set, reject future sched_setscheduler(2) calls updating the policy + * to %SCHED_EXT with -%EACCES. + * + * If set from ops.prep_enable() and the task's policy is already + * %SCHED_EXT, which can happen while the BPF scheduler is being loaded + * or by inhering the parent's policy during fork, the task's policy is + * rejected and forcefully reverted to %SCHED_NORMAL. The number of such + * events are reported through /sys/kernel/debug/sched_ext::nr_rejected. + */ + bool disallow; /* reject switching into SCX */ + /* cold fields */ struct list_head tasks_node; }; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 3f177c161c1e..9e566e72c3f2 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7598,6 +7598,10 @@ static int __sched_setscheduler(struct task_struct *p, goto unlock; } + retval = scx_check_setscheduler(p, policy); + if (retval) + goto unlock; + /* * If not changing anything there's no need to proceed further, * but store a possible modification of reset_on_fork. diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 1af74ea8ed42..b9d55c25cec9 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -1641,6 +1641,8 @@ static int scx_ops_prepare_task(struct task_struct *p, struct task_group *tg) WARN_ON_ONCE(p->scx.flags & SCX_TASK_OPS_PREPPED); + p->scx.disallow = false; + if (SCX_HAS_OP(prep_enable)) { struct scx_enable_args args = { }; @@ -1651,6 +1653,27 @@ static int scx_ops_prepare_task(struct task_struct *p, struct task_group *tg) } } + if (p->scx.disallow) { + struct rq *rq; + struct rq_flags rf; + + rq = task_rq_lock(p, &rf); + + /* + * We're either in fork or load path and @p->policy will be + * applied right after. Reverting @p->policy here and rejecting + * %SCHED_EXT transitions from scx_check_setscheduler() + * guarantees that if ops.prep_enable() sets @p->disallow, @p + * can never be in SCX. + */ + if (p->policy == SCHED_EXT) { + p->policy = SCHED_NORMAL; + atomic64_inc(&scx_nr_rejected); + } + + task_rq_unlock(rq, p, &rf); + } + p->scx.flags |= (SCX_TASK_OPS_PREPPED | SCX_TASK_WATCHDOG_RESET); return 0; } @@ -1796,6 +1819,18 @@ static void switching_to_scx(struct rq *rq, struct task_struct *p) static void check_preempt_curr_scx(struct rq *rq, struct task_struct *p,int wake_flags) {} static void switched_to_scx(struct rq *rq, struct task_struct *p) {} +int scx_check_setscheduler(struct task_struct *p, int policy) +{ + lockdep_assert_rq_held(task_rq(p)); + + /* if disallow, reject transitioning into SCX */ + if (scx_enabled() && READ_ONCE(p->scx.disallow) && + p->policy != policy && policy == SCHED_EXT) + return -EACCES; + + return 0; +} + /* * Omitted operations: * @@ -2479,6 +2514,9 @@ static int bpf_scx_btf_struct_access(struct bpf_verifier_log *log, if (off >= offsetof(struct task_struct, scx.slice) && off + size <= offsetofend(struct task_struct, scx.slice)) return SCALAR_VALUE; + if (off >= offsetof(struct task_struct, scx.disallow) && + off + size <= offsetofend(struct task_struct, scx.disallow)) + return SCALAR_VALUE; } if (atype == BPF_READ) diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h index 7dfa7b888487..76c94babd19e 100644 --- a/kernel/sched/ext.h +++ b/kernel/sched/ext.h @@ -67,6 +67,7 @@ void scx_pre_fork(struct task_struct *p); int scx_fork(struct task_struct *p); void scx_post_fork(struct task_struct *p); void scx_cancel_fork(struct task_struct *p); +int scx_check_setscheduler(struct task_struct *p, int policy); void init_sched_ext_class(void); __printf(2, 3) void scx_ops_error_type(enum scx_exit_type type, @@ -121,6 +122,8 @@ static inline void scx_pre_fork(struct task_struct *p) {} static inline int scx_fork(struct task_struct *p) { return 0; } static inline void scx_post_fork(struct task_struct *p) {} static inline void scx_cancel_fork(struct task_struct *p) {} +static inline int scx_check_setscheduler(struct task_struct *p, + int policy) { return 0; } static inline void init_sched_ext_class(void) {} static inline void scx_notify_sched_tick(void) {} diff --git a/tools/sched_ext/scx_example_qmap.bpf.c b/tools/sched_ext/scx_example_qmap.bpf.c index b22b8d82846e..46bc16ed301f 100644 --- a/tools/sched_ext/scx_example_qmap.bpf.c +++ b/tools/sched_ext/scx_example_qmap.bpf.c @@ -24,6 +24,7 @@ char _license[] SEC("license") = "GPL"; const volatile u64 slice_ns = SCX_SLICE_DFL; const volatile u32 stall_user_nth; const volatile u32 stall_kernel_nth; +const volatile s32 disallow_tgid; u32 test_error_cnt; @@ -221,6 +222,9 @@ void BPF_STRUCT_OPS(qmap_dispatch, s32 cpu, struct task_struct *prev) s32 BPF_STRUCT_OPS(qmap_prep_enable, struct task_struct *p, struct scx_enable_args *args) { + if (p->tgid == disallow_tgid) + p->scx.disallow = true; + /* * @p is new. Let's ensure that its task_ctx is available. We can sleep * in this function and the following will automatically use GFP_KERNEL. diff --git a/tools/sched_ext/scx_example_qmap.c b/tools/sched_ext/scx_example_qmap.c index dd490a146b1a..dff9323dfd20 100644 --- a/tools/sched_ext/scx_example_qmap.c +++ b/tools/sched_ext/scx_example_qmap.c @@ -20,12 +20,13 @@ const char help_fmt[] = "\n" "See the top-level comment in .bpf.c for more details.\n" "\n" -"Usage: %s [-s SLICE_US] [-e COUNT] [-t COUNT] [-T COUNT]\n" +"Usage: %s [-s SLICE_US] [-e COUNT] [-t COUNT] [-T COUNT] [-d PID]\n" "\n" " -s SLICE_US Override slice duration\n" " -e COUNT Trigger scx_bpf_error() after COUNT enqueues\n" " -t COUNT Stall every COUNT'th user thread\n" " -T COUNT Stall every COUNT'th kernel thread\n" +" -d PID Disallow a process from switching into SCHED_EXT (-1 for self)\n" " -h Display this help and exit\n"; static volatile int exit_req; @@ -63,6 +64,11 @@ int main(int argc, char **argv) case 'T': skel->rodata->stall_kernel_nth = strtoul(optarg, NULL, 0); break; + case 'd': + skel->rodata->disallow_tgid = strtol(optarg, NULL, 0); + if (skel->rodata->disallow_tgid < 0) + skel->rodata->disallow_tgid = getpid(); + break; default: fprintf(stderr, help_fmt, basename(argv[0])); return opt != 'h'; From patchwork Sat Jan 28 00:16:27 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 13119577 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id D3CFDC61DB3 for ; Sat, 28 Jan 2023 00:19:27 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S233321AbjA1AT0 (ORCPT ); Fri, 27 Jan 2023 19:19:26 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:37088 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232943AbjA1ASR (ORCPT ); Fri, 27 Jan 2023 19:18:17 -0500 Received: from mail-pl1-x62b.google.com (mail-pl1-x62b.google.com [IPv6:2607:f8b0:4864:20::62b]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id EB7647D6D1; Fri, 27 Jan 2023 16:17:38 -0800 (PST) Received: by mail-pl1-x62b.google.com with SMTP id z13so6570664plg.6; Fri, 27 Jan 2023 16:17:38 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=2762tv9NrSIPMrglXNQsSYNt9iMRZ5q4VM4Sve5Uoww=; b=offoSr/KlMcOHKkZiG+dgkDfXGpqzOLW/eZ8VnoKL0R7GTUED+4NYXCx/Q3ccsISKS 8CQ62owuCFtjZjZibP0FQKUB99osUyH7yWT0Pn4cUcK/vrUKidTPF6zviUVOc7aNwoLz YaOhiIEFV/RB5JCTVaFCNKS2hQLOym3UrxX9hKJV6G1Gey2TsUiothUgtReCJ35Npqse 6+et+ICu7/rm5GwHnzVYtFxgeULWeSNTsDN64LIIkcxZMu9NO1v/iEsXdIQqdws6+1iY qVai11ayvvRUjbujnLs549xfEl9hKNBUPkGAm94jgkt+f5rqjx26jp5Qpvx5wXITc2kI AyPw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=2762tv9NrSIPMrglXNQsSYNt9iMRZ5q4VM4Sve5Uoww=; b=K1k8+PX+aKC2VaOcxBi/eI10bbm5L/tXqkRLPnli9HjpBJJk/rC8aO9JrJ4gN5NehD YADStv6zzwQ6xSmOe4XseuY6IhjbnEdQZWV2S7stIqh6J6/iHSnm48sbzBirMUaCKL/0 djAnG5KgHTI4pSY2cgK6BxqohrBqzoRTXTzdujmnEkKMXTf92E6+QYUGs2gRgG6OsxwH 7J2EmxjEFT7NJf0R3CFZ1GNi2aWdNgXjvTOCASvJ3YMEqrGf4Q9fsOV/WykvuWqb1odP zqRhXzUxTVEbPsLpEi3JQ0XR5nxez/Nbvvnqqo4S3xU7TRbKftPt5Mlt4QyIzUIrEDI2 pRpA== X-Gm-Message-State: AO0yUKW0Crcgs0iMk3bxEoBEbVuxXazSwy4GAs3JJeFGa6EWsMGDDMfp nSofJzDARVkowgyTE/fP6Kw= X-Google-Smtp-Source: AK7set8qrqri+vz0M8S3KUwDIchpGMv7FcB2xyq2cdSXTETVNnCAHh4yWQARFG3OpTIQA9K9/q0zDQ== X-Received: by 2002:a17:903:234b:b0:196:595b:2580 with SMTP id c11-20020a170903234b00b00196595b2580mr4031452plh.0.1674865045351; Fri, 27 Jan 2023 16:17:25 -0800 (PST) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id a9-20020a170902ecc900b001949ae8c275sm3405975plh.141.2023.01.27.16.17.24 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 27 Jan 2023 16:17:24 -0800 (PST) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 18/30] sched_ext: Allow BPF schedulers to switch all eligible tasks into sched_ext Date: Fri, 27 Jan 2023 14:16:27 -1000 Message-Id: <20230128001639.3510083-19-tj@kernel.org> X-Mailer: git-send-email 2.39.1 In-Reply-To: <20230128001639.3510083-1-tj@kernel.org> References: <20230128001639.3510083-1-tj@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org Currently, to use sched_ext, each task has to be put into sched_ext using sched_setscheduler(2). However, some BPF schedulers and use cases might prefer to service all eligible tasks. This patch adds a new kfunc helper, scx_bpf_switch_all(), that BPF schedulers can call from ops.init() to switch all SCHED_NORMAL, SCHED_BATCH and SCHED_IDLE tasks into sched_ext. This has the benefit that the scheduler swaps are transparent to the users and applications. As we know that CFS is not being used when scx_bpf_switch_all() is used, we can also disable hot path entry points with static_branches. Both the dummy and qmap example schedulers are updated with the '-a' option which enables the switch_all behavior. Signed-off-by: Tejun Heo Suggested-by: Barret Rhoden Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- kernel/sched/core.c | 8 +++-- kernel/sched/ext.c | 45 +++++++++++++++++++++++++ kernel/sched/ext.h | 5 +++ tools/sched_ext/scx_common.bpf.h | 1 + tools/sched_ext/scx_example_dummy.bpf.c | 11 ++++++ tools/sched_ext/scx_example_dummy.c | 8 +++-- tools/sched_ext/scx_example_qmap.bpf.c | 9 +++++ tools/sched_ext/scx_example_qmap.c | 7 ++-- 8 files changed, 87 insertions(+), 7 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 9e566e72c3f2..5b68b822312b 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1204,7 +1204,7 @@ bool sched_can_stop_tick(struct rq *rq) * if there's more than one we need the tick for involuntary * preemption. */ - if (rq->nr_running > 1) + if (!scx_switched_all() && rq->nr_running > 1) return false; return true; @@ -5590,8 +5590,10 @@ void scheduler_tick(void) perf_event_task_tick(); #ifdef CONFIG_SMP - rq->idle_balance = idle_cpu(cpu); - trigger_load_balance(rq); + if (!scx_switched_all()) { + rq->idle_balance = idle_cpu(cpu); + trigger_load_balance(rq); + } #endif } diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index b9d55c25cec9..63f0a3cf2d53 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -73,6 +73,10 @@ static DEFINE_MUTEX(scx_ops_enable_mutex); DEFINE_STATIC_KEY_FALSE(__scx_ops_enabled); DEFINE_STATIC_PERCPU_RWSEM(scx_fork_rwsem); static atomic_t scx_ops_enable_state_var = ATOMIC_INIT(SCX_OPS_DISABLED); +static bool scx_switch_all_req; +static bool scx_switching_all; +DEFINE_STATIC_KEY_FALSE(__scx_switched_all); + static struct sched_ext_ops scx_ops; static bool scx_warned_zero_slice; @@ -1966,6 +1970,8 @@ bool task_on_scx(struct task_struct *p) { if (!scx_enabled() || scx_ops_disabling()) return false; + if (READ_ONCE(scx_switching_all)) + return true; return p->policy == SCHED_EXT; } @@ -2092,6 +2098,9 @@ static void scx_ops_disable_workfn(struct kthread_work *work) */ mutex_lock(&scx_ops_enable_mutex); + static_branch_disable(&__scx_switched_all); + WRITE_ONCE(scx_switching_all, false); + /* avoid racing against fork */ cpus_read_lock(); percpu_down_write(&scx_fork_rwsem); @@ -2276,6 +2285,7 @@ static int scx_ops_enable(struct sched_ext_ops *ops) */ cpus_read_lock(); + scx_switch_all_req = false; if (scx_ops.init) { ret = SCX_CALL_OP_RET(SCX_KF_INIT | SCX_KF_SLEEPABLE, init); if (ret) { @@ -2391,6 +2401,8 @@ static int scx_ops_enable(struct sched_ext_ops *ops) * transitions here are synchronized against sched_ext_free() through * scx_tasks_lock. */ + WRITE_ONCE(scx_switching_all, scx_switch_all_req); + scx_task_iter_init(&sti); while ((p = scx_task_iter_next_filtered_locked(&sti))) { if (READ_ONCE(p->__state) != TASK_DEAD) { @@ -2422,6 +2434,9 @@ static int scx_ops_enable(struct sched_ext_ops *ops) goto err_disable_unlock; } + if (scx_switch_all_req) + static_branch_enable_cpuslocked(&__scx_switched_all); + cpus_read_unlock(); mutex_unlock(&scx_ops_enable_mutex); @@ -2456,6 +2471,9 @@ static int scx_debug_show(struct seq_file *m, void *v) mutex_lock(&scx_ops_enable_mutex); seq_printf(m, "%-30s: %s\n", "ops", scx_ops.name); seq_printf(m, "%-30s: %ld\n", "enabled", scx_enabled()); + seq_printf(m, "%-30s: %d\n", "switching_all", + READ_ONCE(scx_switching_all)); + seq_printf(m, "%-30s: %ld\n", "switched_all", scx_switched_all()); seq_printf(m, "%-30s: %s\n", "enable_state", scx_ops_enable_state_str[scx_ops_enable_state()]); seq_printf(m, "%-30s: %llu\n", "nr_rejected", @@ -2694,6 +2712,31 @@ __diag_push(); __diag_ignore_all("-Wmissing-prototypes", "Global functions as their definitions will be in vmlinux BTF"); +/** + * scx_bpf_switch_all - Switch all tasks into SCX + * @into_scx: switch direction + * + * If @into_scx is %true, all existing and future non-dl/rt tasks are switched + * to SCX. If %false, only tasks which have %SCHED_EXT explicitly set are put on + * SCX. The actual switching is asynchronous. Can be called from ops.init(). + */ +void scx_bpf_switch_all(void) +{ + if (!scx_kf_allowed(SCX_KF_INIT)) + return; + + scx_switch_all_req = true; +} + +BTF_SET8_START(scx_kfunc_ids_init) +BTF_ID_FLAGS(func, scx_bpf_switch_all) +BTF_SET8_END(scx_kfunc_ids_init) + +static const struct btf_kfunc_id_set scx_kfunc_set_init = { + .owner = THIS_MODULE, + .set = &scx_kfunc_ids_init, +}; + /** * scx_bpf_create_dsq - Create a custom DSQ * @dsq_id: DSQ to create @@ -3131,6 +3174,8 @@ static int __init register_ext_kfuncs(void) * check using scx_kf_allowed(). */ if ((ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, + &scx_kfunc_set_init)) || + (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &scx_kfunc_set_sleepable)) || (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &scx_kfunc_set_enqueue_dispatch)) || diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h index 76c94babd19e..a4fe649e649d 100644 --- a/kernel/sched/ext.h +++ b/kernel/sched/ext.h @@ -60,7 +60,9 @@ extern unsigned long scx_watchdog_timeout; extern unsigned long scx_watchdog_timestamp; DECLARE_STATIC_KEY_FALSE(__scx_ops_enabled); +DECLARE_STATIC_KEY_FALSE(__scx_switched_all); #define scx_enabled() static_branch_unlikely(&__scx_ops_enabled) +#define scx_switched_all() static_branch_unlikely(&__scx_switched_all) bool task_on_scx(struct task_struct *p); void scx_pre_fork(struct task_struct *p); @@ -95,6 +97,8 @@ static inline void scx_notify_sched_tick(void) static inline const struct sched_class *next_active_class(const struct sched_class *class) { class++; + if (scx_switched_all() && class == &fair_sched_class) + class++; if (!scx_enabled() && class == &ext_sched_class) class++; return class; @@ -117,6 +121,7 @@ static inline const struct sched_class *next_active_class(const struct sched_cla #else /* CONFIG_SCHED_CLASS_EXT */ #define scx_enabled() false +#define scx_switched_all() false static inline void scx_pre_fork(struct task_struct *p) {} static inline int scx_fork(struct task_struct *p) { return 0; } diff --git a/tools/sched_ext/scx_common.bpf.h b/tools/sched_ext/scx_common.bpf.h index b40a4fc6a159..fec19e8d0681 100644 --- a/tools/sched_ext/scx_common.bpf.h +++ b/tools/sched_ext/scx_common.bpf.h @@ -66,6 +66,7 @@ bool bpf_cpumask_and(struct bpf_cpumask *dst, const struct cpumask *src1, const struct cpumask *src2) __ksym; u32 bpf_cpumask_first(const struct cpumask *cpumask) __ksym; +void scx_bpf_switch_all(void) __ksym; s32 scx_bpf_create_dsq(u64 dsq_id, s32 node) __ksym; bool scx_bpf_consume(u64 dsq_id) __ksym; u32 scx_bpf_dispatch_nr_slots(void) __ksym; diff --git a/tools/sched_ext/scx_example_dummy.bpf.c b/tools/sched_ext/scx_example_dummy.bpf.c index ac7b490b5a39..28251373d1c3 100644 --- a/tools/sched_ext/scx_example_dummy.bpf.c +++ b/tools/sched_ext/scx_example_dummy.bpf.c @@ -7,6 +7,7 @@ * * - Statistics tracking how many are queued to local and global dsq's. * - Termination notification for userspace. + * - Support for switch_all. * * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. * Copyright (c) 2022 Tejun Heo @@ -16,6 +17,8 @@ char _license[] SEC("license") = "GPL"; +const volatile bool switch_all; + struct user_exit_info uei; struct { @@ -32,6 +35,13 @@ static void stat_inc(u32 idx) (*cnt_p)++; } +s32 BPF_STRUCT_OPS(dummy_init) +{ + if (switch_all) + scx_bpf_switch_all(); + return 0; +} + void BPF_STRUCT_OPS(dummy_enqueue, struct task_struct *p, u64 enq_flags) { if (enq_flags & SCX_ENQ_LOCAL) { @@ -51,6 +61,7 @@ void BPF_STRUCT_OPS(dummy_exit, struct scx_exit_info *ei) SEC(".struct_ops") struct sched_ext_ops dummy_ops = { .enqueue = (void *)dummy_enqueue, + .init = (void *)dummy_init, .exit = (void *)dummy_exit, .name = "dummy", }; diff --git a/tools/sched_ext/scx_example_dummy.c b/tools/sched_ext/scx_example_dummy.c index 72881c881830..9229973e8698 100644 --- a/tools/sched_ext/scx_example_dummy.c +++ b/tools/sched_ext/scx_example_dummy.c @@ -19,8 +19,9 @@ const char help_fmt[] = "\n" "See the top-level comment in .bpf.c for more details.\n" "\n" -"Usage: %s\n" +"Usage: %s [-a]\n" "\n" +" -a Switch all tasks\n" " -h Display this help and exit\n"; static volatile int exit_req; @@ -64,8 +65,11 @@ int main(int argc, char **argv) skel = scx_example_dummy__open(); assert(skel); - while ((opt = getopt(argc, argv, "h")) != -1) { + while ((opt = getopt(argc, argv, "ah")) != -1) { switch (opt) { + case 'a': + skel->rodata->switch_all = true; + break; default: fprintf(stderr, help_fmt, basename(argv[0])); return opt != 'h'; diff --git a/tools/sched_ext/scx_example_qmap.bpf.c b/tools/sched_ext/scx_example_qmap.bpf.c index 46bc16ed301f..ec8c4ee7ef16 100644 --- a/tools/sched_ext/scx_example_qmap.bpf.c +++ b/tools/sched_ext/scx_example_qmap.bpf.c @@ -22,6 +22,7 @@ char _license[] SEC("license") = "GPL"; const volatile u64 slice_ns = SCX_SLICE_DFL; +const volatile bool switch_all; const volatile u32 stall_user_nth; const volatile u32 stall_kernel_nth; const volatile s32 disallow_tgid; @@ -236,6 +237,13 @@ s32 BPF_STRUCT_OPS(qmap_prep_enable, struct task_struct *p, return -ENOMEM; } +s32 BPF_STRUCT_OPS(qmap_init) +{ + if (switch_all) + scx_bpf_switch_all(); + return 0; +} + void BPF_STRUCT_OPS(qmap_exit, struct scx_exit_info *ei) { uei_record(&uei, ei); @@ -248,6 +256,7 @@ struct sched_ext_ops qmap_ops = { .dequeue = (void *)qmap_dequeue, .dispatch = (void *)qmap_dispatch, .prep_enable = (void *)qmap_prep_enable, + .init = (void *)qmap_init, .exit = (void *)qmap_exit, .timeout_ms = 5000U, .name = "qmap", diff --git a/tools/sched_ext/scx_example_qmap.c b/tools/sched_ext/scx_example_qmap.c index dff9323dfd20..30633122e6d5 100644 --- a/tools/sched_ext/scx_example_qmap.c +++ b/tools/sched_ext/scx_example_qmap.c @@ -20,7 +20,7 @@ const char help_fmt[] = "\n" "See the top-level comment in .bpf.c for more details.\n" "\n" -"Usage: %s [-s SLICE_US] [-e COUNT] [-t COUNT] [-T COUNT] [-d PID]\n" +"Usage: %s [-a] [-s SLICE_US] [-e COUNT] [-t COUNT] [-T COUNT] [-d PID]\n" "\n" " -s SLICE_US Override slice duration\n" " -e COUNT Trigger scx_bpf_error() after COUNT enqueues\n" @@ -50,8 +50,11 @@ int main(int argc, char **argv) skel = scx_example_qmap__open(); assert(skel); - while ((opt = getopt(argc, argv, "hs:e:t:T:d:")) != -1) { + while ((opt = getopt(argc, argv, "ahs:e:t:T:d:")) != -1) { switch (opt) { + case 'a': + skel->rodata->switch_all = true; + break; case 's': skel->rodata->slice_ns = strtoull(optarg, NULL, 0) * 1000; break; From patchwork Sat Jan 28 00:16:28 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 13119574 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id EE5F9C61DA7 for ; Sat, 28 Jan 2023 00:19:26 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S233303AbjA1ATZ (ORCPT ); Fri, 27 Jan 2023 19:19:25 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:36678 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232950AbjA1ASR (ORCPT ); Fri, 27 Jan 2023 19:18:17 -0500 Received: from mail-pj1-x1036.google.com (mail-pj1-x1036.google.com [IPv6:2607:f8b0:4864:20::1036]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id EEAC988F02; Fri, 27 Jan 2023 16:17:38 -0800 (PST) Received: by mail-pj1-x1036.google.com with SMTP id e8-20020a17090a9a8800b0022c387f0f93so5179003pjp.3; Fri, 27 Jan 2023 16:17:38 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=uAFyqncZwIkK/hOTE3eqi6rO8X/Dhs6kWDTVkwv4vYc=; b=XgD3w2Sm2psqPXjawCd1dkxzuKZSMU20Z6rlAMkvzIgoCSNSx570Z+Kw97bf81Rh3S Wm8z8k6xZTkeiVPWo+meJxHl0vV6UuagaMIRdAE8tZYEt/6O8QoglVmw3aIB2NT2A7zx f9BrzBotKFSJecgJNiQV5UjLsZk2oceDl2aVHnZeyRLgrnXmqcB2Si5xe4f00esK3Vry xqFFDVwL1oZWTkPl0XNGbBP3XrMjiFyWYXMhzb2IgcXU+FrlitpZ2QeDWbYMppL4Bw2Q ZWffMXxS96mq1ZF8CuX1je+8WdpJdLQJTtCzuOWo8vi6zsH/QR0xNED92qJPqGLaoh3z gcuQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=uAFyqncZwIkK/hOTE3eqi6rO8X/Dhs6kWDTVkwv4vYc=; b=DVhVn7lkXQTruR0vkZZxBfb/tzXp9Ro2/H7CrNhlIMatVLwQ+0l+EsPT9jasWGL4J2 iqmh3DyOmiwgI0cm2bPXN+WtbBXxjkwzm2hUiiVs78xMLeDSZlnri8QgIGjE1+9ya6Qu 7WFVb9zlLjzCc5u/4eVOopdAIp5J0vyvs63FuSQglxrFdjsKjIGnl4AzUJgiHjI5gHgs NDIsa2BqTU+vl3USDki4zKfrIKrNBFw3JK1euvZxeyS8Ned7QQo/3VYQrdz3Uq5rbcmA UiBQ08Mpslp9jTv62aQrhzp4W6d5nKrAFwfBjRr9juuNXhrB5oDVnk2yxaqO8S9bsMuo vdog== X-Gm-Message-State: AO0yUKWwIM4ZAIDUX7+1sSlbdFEjpn/vbyDf4pPfEJfJFcBOO1YVfnLh fYRQTeDTSNP/hB0sE/De3FM= X-Google-Smtp-Source: AK7set/rAEsKtTk6NDgBfp2ljHUVAGEwO9bDnv+NshJ1mfM9bzPQWOPQgPFKfdL73OQJMSJaSmY/IQ== X-Received: by 2002:a17:90b:4c8d:b0:22c:43:714f with SMTP id my13-20020a17090b4c8d00b0022c0043714fmr13977973pjb.7.1674865047205; Fri, 27 Jan 2023 16:17:27 -0800 (PST) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id mj8-20020a17090b368800b0021900ba8eeesm5694175pjb.2.2023.01.27.16.17.26 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 27 Jan 2023 16:17:26 -0800 (PST) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo , Julia Lawall Subject: [PATCH 19/30] sched_ext: Implement scx_bpf_kick_cpu() and task preemption support Date: Fri, 27 Jan 2023 14:16:28 -1000 Message-Id: <20230128001639.3510083-20-tj@kernel.org> X-Mailer: git-send-email 2.39.1 In-Reply-To: <20230128001639.3510083-1-tj@kernel.org> References: <20230128001639.3510083-1-tj@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org It's often useful to wake up and/or trigger reschedule on other CPUs. This patch adds scx_bpf_kick_cpu() kfunc helper that BPF scheduler can call to kick the target CPU into the scheduling path. As a sched_ext task relinquishes its CPU only after its slice is depleted, this patch also adds SCX_KICK_PREEMPT and SCX_ENQ_PREEMPT which clears the slice of the target CPU's current task to guarantee that sched_ext's scheduling path runs on the CPU. This patch also adds a new example scheduler, scx_example_central, which demonstrates central scheduling where one CPU is responsible for making all scheduling decisions in the system. The central CPU makes scheduling decisions for all CPUs in the system, queues tasks on the appropriate local dsq's and preempts the worker CPUs. The worker CPUs in turn preempt the central CPU when it needs tasks to run. Currently, every CPU depends on its own tick to expire the current task. A follow-up patch implementing tickless support for sched_ext will allow the worker CPUs to go full tickless so that they can run completely undisturbed. v2: * Julia Lawall reported that scx_example_central can overflow the dispatch buffer and malfunction. As scheduling for other CPUs can't be handled by the automatic retry mechanism, fix by implementing an explicit overflow and retry handling. * Updated to use generic BPF cpumask helpers. Signed-off-by: Tejun Heo Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden Cc: Julia Lawall --- include/linux/sched/ext.h | 4 + kernel/sched/ext.c | 82 +++++++- kernel/sched/ext.h | 12 ++ kernel/sched/sched.h | 3 + tools/sched_ext/.gitignore | 1 + tools/sched_ext/Makefile | 8 +- tools/sched_ext/scx_common.bpf.h | 1 + tools/sched_ext/scx_example_central.bpf.c | 231 ++++++++++++++++++++++ tools/sched_ext/scx_example_central.c | 93 +++++++++ 9 files changed, 430 insertions(+), 5 deletions(-) create mode 100644 tools/sched_ext/scx_example_central.bpf.c create mode 100644 tools/sched_ext/scx_example_central.c diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h index b4c4b83a07f6..10cd3ede5ae5 100644 --- a/include/linux/sched/ext.h +++ b/include/linux/sched/ext.h @@ -396,6 +396,10 @@ struct sched_ext_entity { * scx_bpf_dispatch() but can also be modified directly by the BPF * scheduler. Automatically decreased by SCX as the task executes. On * depletion, a scheduling event is triggered. + * + * This value is cleared to zero if the task is preempted by + * %SCX_KICK_PREEMPT and shouldn't be used to determine how long the + * task ran. Use p->se.sum_exec_runtime instead. */ u64 slice; diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 63f0a3cf2d53..098edfc56a9b 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -466,7 +466,7 @@ static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p, } } - if (enq_flags & SCX_ENQ_HEAD) + if (enq_flags & (SCX_ENQ_HEAD | SCX_ENQ_PREEMPT)) list_add(&p->scx.dsq_node, &dsq->fifo); else list_add_tail(&p->scx.dsq_node, &dsq->fifo); @@ -482,8 +482,16 @@ static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p, if (is_local) { struct rq *rq = container_of(dsq, struct rq, scx.local_dsq); + bool preempt = false; - if (sched_class_above(&ext_sched_class, rq->curr->sched_class)) + if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr && + rq->curr->sched_class == &ext_sched_class) { + rq->curr->scx.slice = 0; + preempt = true; + } + + if (preempt || sched_class_above(&ext_sched_class, + rq->curr->sched_class)) resched_curr(rq); } else { raw_spin_unlock(&dsq->lock); @@ -1839,7 +1847,9 @@ int scx_check_setscheduler(struct task_struct *p, int policy) * Omitted operations: * * - check_preempt_curr: NOOP as it isn't useful in the wakeup path because the - * task isn't tied to the CPU at that point. + * task isn't tied to the CPU at that point. Preemption is implemented by + * resetting the victim task's slice to 0 and triggering reschedule on the + * target CPU. * * - migrate_task_rq: Unncessary as task to cpu mapping is transient. * @@ -2672,6 +2682,32 @@ static const struct sysrq_key_op sysrq_sched_ext_reset_op = { .enable_mask = SYSRQ_ENABLE_RTNICE, }; +static void kick_cpus_irq_workfn(struct irq_work *irq_work) +{ + struct rq *this_rq = this_rq(); + int this_cpu = cpu_of(this_rq); + int cpu; + + for_each_cpu(cpu, this_rq->scx.cpus_to_kick) { + struct rq *rq = cpu_rq(cpu); + unsigned long flags; + + raw_spin_rq_lock_irqsave(rq, flags); + + if (cpu_online(cpu) || cpu == this_cpu) { + if (cpumask_test_cpu(cpu, this_rq->scx.cpus_to_preempt) && + rq->curr->sched_class == &ext_sched_class) + rq->curr->scx.slice = 0; + resched_curr(rq); + } + + raw_spin_rq_unlock_irqrestore(rq, flags); + } + + cpumask_clear(this_rq->scx.cpus_to_kick); + cpumask_clear(this_rq->scx.cpus_to_preempt); +} + void __init init_sched_ext_class(void) { int cpu; @@ -2695,6 +2731,10 @@ void __init init_sched_ext_class(void) init_dsq(&rq->scx.local_dsq, SCX_DSQ_LOCAL); INIT_LIST_HEAD(&rq->scx.watchdog_list); + + BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_kick, GFP_KERNEL)); + BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_preempt, GFP_KERNEL)); + init_irq_work(&rq->scx.kick_cpus_irq_work, kick_cpus_irq_workfn); } register_sysrq_key('S', &sysrq_sched_ext_reset_op); @@ -2917,6 +2957,41 @@ static const struct btf_kfunc_id_set scx_kfunc_set_dispatch = { .set = &scx_kfunc_ids_dispatch, }; +/** + * scx_bpf_kick_cpu - Trigger reschedule on a CPU + * @cpu: cpu to kick + * @flags: SCX_KICK_* flags + * + * Kick @cpu into rescheduling. This can be used to wake up an idle CPU or + * trigger rescheduling on a busy CPU. This can be called from any online + * scx_ops operation and the actual kicking is performed asynchronously through + * an irq work. + */ +void scx_bpf_kick_cpu(s32 cpu, u64 flags) +{ + struct rq *rq; + + if (!ops_cpu_valid(cpu)) { + scx_ops_error("invalid cpu %d", cpu); + return; + } + + preempt_disable(); + rq = this_rq(); + + /* + * Actual kicking is bounced to kick_cpus_irq_workfn() to avoid nesting + * rq locks. We can probably be smarter and avoid bouncing if called + * from ops which don't hold a rq lock. + */ + cpumask_set_cpu(cpu, rq->scx.cpus_to_kick); + if (flags & SCX_KICK_PREEMPT) + cpumask_set_cpu(cpu, rq->scx.cpus_to_preempt); + + irq_work_queue(&rq->scx.kick_cpus_irq_work); + preempt_enable(); +} + /** * scx_bpf_dsq_nr_queued - Return the number of queued tasks * @dsq_id: id of the DSQ @@ -3138,6 +3213,7 @@ s32 scx_bpf_task_cpu(const struct task_struct *p) } BTF_SET8_START(scx_kfunc_ids_any) +BTF_ID_FLAGS(func, scx_bpf_kick_cpu) BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued) BTF_ID_FLAGS(func, scx_bpf_test_and_clear_cpu_idle) BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu, KF_TRUSTED_ARGS) diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h index a4fe649e649d..0b04626e8ca2 100644 --- a/kernel/sched/ext.h +++ b/kernel/sched/ext.h @@ -19,6 +19,14 @@ enum scx_enq_flags { /* high 32bits are SCX specific */ + /* + * Set the following to trigger preemption when calling + * scx_bpf_dispatch() with a local dsq as the target. The slice of the + * current task is cleared to zero and the CPU is kicked into the + * scheduling path. Implies %SCX_ENQ_HEAD. + */ + SCX_ENQ_PREEMPT = 1LLU << 32, + /* * The task being enqueued is the only task available for the cpu. By * default, ext core keeps executing such tasks but when @@ -51,6 +59,10 @@ enum scx_deq_flags { SCX_DEQ_SLEEP = DEQUEUE_SLEEP, }; +enum scx_kick_flags { + SCX_KICK_PREEMPT = 1LLU << 0, /* force scheduling on the CPU */ +}; + #ifdef CONFIG_SCHED_CLASS_EXT extern const struct sched_class ext_sched_class; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 112c2f127c95..3c16caecd3a5 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -690,6 +690,9 @@ struct scx_rq { struct list_head watchdog_list; u64 ops_qseq; u32 nr_running; + cpumask_var_t cpus_to_kick; + cpumask_var_t cpus_to_preempt; + struct irq_work kick_cpus_irq_work; }; #endif /* CONFIG_SCHED_CLASS_EXT */ diff --git a/tools/sched_ext/.gitignore b/tools/sched_ext/.gitignore index 6734f7fd9324..389f0e5b0970 100644 --- a/tools/sched_ext/.gitignore +++ b/tools/sched_ext/.gitignore @@ -1,5 +1,6 @@ scx_example_dummy scx_example_qmap +scx_example_central *.skel.h *.subskel.h /tools/ diff --git a/tools/sched_ext/Makefile b/tools/sched_ext/Makefile index 926b0a36c221..c6c3669c47b9 100644 --- a/tools/sched_ext/Makefile +++ b/tools/sched_ext/Makefile @@ -115,7 +115,7 @@ BPF_CFLAGS = -g -D__TARGET_ARCH_$(SRCARCH) \ -Wno-compare-distinct-pointer-types \ -O2 -mcpu=v3 -all: scx_example_dummy scx_example_qmap +all: scx_example_dummy scx_example_qmap scx_example_central # sort removes libbpf duplicates when not cross-building MAKE_DIRS := $(sort $(BUILD_DIR)/libbpf $(HOST_BUILD_DIR)/libbpf \ @@ -174,10 +174,14 @@ scx_example_qmap: scx_example_qmap.c scx_example_qmap.skel.h user_exit_info.h $(CC) $(CFLAGS) -c $< -o $@.o $(CC) -o $@ $@.o $(HOST_BPFOBJ) $(LDFLAGS) +scx_example_central: scx_example_central.c scx_example_central.skel.h user_exit_info.h + $(CC) $(CFLAGS) -c $< -o $@.o + $(CC) -o $@ $@.o $(HOST_BPFOBJ) $(LDFLAGS) + clean: rm -rf $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) rm -f *.o *.bpf.o *.skel.h *.subskel.h - rm -f scx_example_dummy scx_example_qmap + rm -f scx_example_dummy scx_example_qmap scx_example_central .PHONY: all clean diff --git a/tools/sched_ext/scx_common.bpf.h b/tools/sched_ext/scx_common.bpf.h index fec19e8d0681..ff32e4dd30a6 100644 --- a/tools/sched_ext/scx_common.bpf.h +++ b/tools/sched_ext/scx_common.bpf.h @@ -71,6 +71,7 @@ s32 scx_bpf_create_dsq(u64 dsq_id, s32 node) __ksym; bool scx_bpf_consume(u64 dsq_id) __ksym; u32 scx_bpf_dispatch_nr_slots(void) __ksym; void scx_bpf_dispatch(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym; +void scx_bpf_kick_cpu(s32 cpu, u64 flags) __ksym; s32 scx_bpf_dsq_nr_queued(u64 dsq_id) __ksym; bool scx_bpf_test_and_clear_cpu_idle(s32 cpu) __ksym; s32 scx_bpf_pick_idle_cpu(const cpumask_t *cpus_allowed) __ksym; diff --git a/tools/sched_ext/scx_example_central.bpf.c b/tools/sched_ext/scx_example_central.bpf.c new file mode 100644 index 000000000000..6b246bf308d0 --- /dev/null +++ b/tools/sched_ext/scx_example_central.bpf.c @@ -0,0 +1,231 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * A central FIFO sched_ext scheduler which demonstrates the followings: + * + * a. Making all scheduling decisions from one CPU: + * + * The central CPU is the only one making scheduling decisions. All other + * CPUs kick the central CPU when they run out of tasks to run. + * + * There is one global BPF queue and the central CPU schedules all CPUs by + * dispatching from the global queue to each CPU's local dsq from dispatch(). + * This isn't the most straightforward. e.g. It'd be easier to bounce + * through per-CPU BPF queues. The current design is chosen to maximally + * utilize and verify various scx mechanisms such as LOCAL_ON dispatching. + * + * b. Preemption + * + * SCX_KICK_PREEMPT is used to trigger scheduling and CPUs to move to the + * next tasks. + * + * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2022 Tejun Heo + * Copyright (c) 2022 David Vernet + */ +#include "scx_common.bpf.h" + +char _license[] SEC("license") = "GPL"; + +enum { + FALLBACK_DSQ_ID = 0, + MAX_CPUS = 4096, + MS_TO_NS = 1000LLU * 1000, + TIMER_INTERVAL_NS = 1 * MS_TO_NS, +}; + +const volatile bool switch_all; +const volatile s32 central_cpu; +const volatile u32 nr_cpu_ids; + +u64 nr_total, nr_locals, nr_queued, nr_lost_pids; +u64 nr_dispatches, nr_mismatches, nr_retries; +u64 nr_overflows; + +struct user_exit_info uei; + +struct { + __uint(type, BPF_MAP_TYPE_QUEUE); + __uint(max_entries, 4096); + __type(value, s32); +} central_q SEC(".maps"); + +/* can't use percpu map due to bad lookups */ +static bool cpu_gimme_task[MAX_CPUS]; + +struct central_timer { + struct bpf_timer timer; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, u32); + __type(value, struct central_timer); +} central_timer SEC(".maps"); + +static bool vtime_before(u64 a, u64 b) +{ + return (s64)(a - b) < 0; +} + +s32 BPF_STRUCT_OPS(central_select_cpu, struct task_struct *p, + s32 prev_cpu, u64 wake_flags) +{ + /* + * Steer wakeups to the central CPU as much as possible to avoid + * disturbing other CPUs. It's safe to blindly return the central cpu as + * select_cpu() is a hint and if @p can't be on it, the kernel will + * automatically pick a fallback CPU. + */ + return central_cpu; +} + +void BPF_STRUCT_OPS(central_enqueue, struct task_struct *p, u64 enq_flags) +{ + s32 pid = p->pid; + + __sync_fetch_and_add(&nr_total, 1); + + if (bpf_map_push_elem(¢ral_q, &pid, 0)) { + __sync_fetch_and_add(&nr_overflows, 1); + scx_bpf_dispatch(p, FALLBACK_DSQ_ID, SCX_SLICE_DFL, enq_flags); + return; + } + + __sync_fetch_and_add(&nr_queued, 1); + + if (!scx_bpf_task_running(p)) + scx_bpf_kick_cpu(central_cpu, SCX_KICK_PREEMPT); +} + +static int dispatch_a_task_loopfn(u32 idx, void *data) +{ + s32 cpu = *(s32 *)data; + s32 pid; + struct task_struct *p; + bool *gimme; + + if (bpf_map_pop_elem(¢ral_q, &pid)) + return 1; + + __sync_fetch_and_sub(&nr_queued, 1); + + p = bpf_task_from_pid(pid); + if (!p) { + __sync_fetch_and_add(&nr_lost_pids, 1); + return 0; + } + + /* + * If we can't run the task at the top, do the dumb thing and bounce it + * to the fallback dsq. + */ + if (!bpf_cpumask_test_cpu(cpu, p->cpus_ptr)) { + __sync_fetch_and_add(&nr_mismatches, 1); + scx_bpf_dispatch(p, FALLBACK_DSQ_ID, SCX_SLICE_DFL, 0); + bpf_task_release(p); + return 0; + } + + /* dispatch to the local and mark that @cpu doesn't need more tasks */ + scx_bpf_dispatch(p, SCX_DSQ_LOCAL_ON | cpu, SCX_SLICE_DFL, 0); + + if (cpu != central_cpu) + scx_bpf_kick_cpu(cpu, 0); + + gimme = MEMBER_VPTR(cpu_gimme_task, [cpu]); + if (gimme) + *gimme = false; + + bpf_task_release(p); + return 1; +} + +static int dispatch_to_one_cpu_loopfn(u32 idx, void *data) +{ + s32 cpu = idx; + + if (!scx_bpf_dispatch_nr_slots()) + return 1; + + if (cpu >= 0 && cpu < MAX_CPUS) { + bool *gimme = MEMBER_VPTR(cpu_gimme_task, [cpu]); + if (gimme && !*gimme) + return 0; + } + + bpf_loop(1 << 23, dispatch_a_task_loopfn, &cpu, 0); + return 0; +} + +void BPF_STRUCT_OPS(central_dispatch, s32 cpu, struct task_struct *prev) +{ + if (cpu == central_cpu) { + /* dispatch for all other CPUs first */ + __sync_fetch_and_add(&nr_dispatches, 1); + bpf_loop(nr_cpu_ids, dispatch_to_one_cpu_loopfn, NULL, 0); + + /* + * Retry if we ran out of dispatch buffer slots as we might have + * skipped some CPUs and also need to dispatch for self. The ext + * core automatically retries if the local dsq is empty but we + * can't rely on that as we're dispatching for other CPUs too. + * Kick self explicitly to retry. + */ + if (!scx_bpf_dispatch_nr_slots()) { + __sync_fetch_and_add(&nr_retries, 1); + scx_bpf_kick_cpu(central_cpu, SCX_KICK_PREEMPT); + return; + } + + /* look for a task to run on the central CPU */ + if (scx_bpf_consume(FALLBACK_DSQ_ID)) + return; + bpf_loop(1 << 23, dispatch_a_task_loopfn, &cpu, 0); + } else { + bool *gimme; + + if (scx_bpf_consume(FALLBACK_DSQ_ID)) + return; + + gimme = MEMBER_VPTR(cpu_gimme_task, [cpu]); + if (gimme) + *gimme = true; + + /* + * Force dispatch on the scheduling CPU so that it finds a task + * to run for us. + */ + scx_bpf_kick_cpu(central_cpu, SCX_KICK_PREEMPT); + } +} + +int BPF_STRUCT_OPS_SLEEPABLE(central_init) +{ + if (switch_all) + scx_bpf_switch_all(); + + return scx_bpf_create_dsq(FALLBACK_DSQ_ID, -1); +} + +void BPF_STRUCT_OPS(central_exit, struct scx_exit_info *ei) +{ + uei_record(&uei, ei); +} + +SEC(".struct_ops") +struct sched_ext_ops central_ops = { + /* + * We are offloading all scheduling decisions to the central CPU and + * thus being the last task on a given CPU doesn't mean anything + * special. Enqueue the last tasks like any other tasks. + */ + .flags = SCX_OPS_ENQ_LAST, + + .select_cpu = (void *)central_select_cpu, + .enqueue = (void *)central_enqueue, + .dispatch = (void *)central_dispatch, + .init = (void *)central_init, + .exit = (void *)central_exit, + .name = "central", +}; diff --git a/tools/sched_ext/scx_example_central.c b/tools/sched_ext/scx_example_central.c new file mode 100644 index 000000000000..14f6598c03df --- /dev/null +++ b/tools/sched_ext/scx_example_central.c @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2022 Tejun Heo + * Copyright (c) 2022 David Vernet + */ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include "user_exit_info.h" +#include "scx_example_central.skel.h" + +const char help_fmt[] = +"A central FIFO sched_ext scheduler.\n" +"\n" +"See the top-level comment in .bpf.c for more details.\n" +"\n" +"Usage: %s [-a] [-c CPU]\n" +"\n" +" -a Switch all tasks\n" +" -c CPU Override the central CPU (default: 0)\n" +" -h Display this help and exit\n"; + +static volatile int exit_req; + +static void sigint_handler(int dummy) +{ + exit_req = 1; +} + +int main(int argc, char **argv) +{ + struct scx_example_central *skel; + struct bpf_link *link; + u64 seq = 0; + s32 opt; + + signal(SIGINT, sigint_handler); + signal(SIGTERM, sigint_handler); + + libbpf_set_strict_mode(LIBBPF_STRICT_ALL); + + skel = scx_example_central__open(); + assert(skel); + + skel->rodata->central_cpu = 0; + skel->rodata->nr_cpu_ids = libbpf_num_possible_cpus(); + + while ((opt = getopt(argc, argv, "ahc:")) != -1) { + switch (opt) { + case 'a': + skel->rodata->switch_all = true; + break; + case 'c': + skel->rodata->central_cpu = strtoul(optarg, NULL, 0); + break; + default: + fprintf(stderr, help_fmt, basename(argv[0])); + return opt != 'h'; + } + } + + assert(!scx_example_central__load(skel)); + + link = bpf_map__attach_struct_ops(skel->maps.central_ops); + assert(link); + + while (!exit_req && !uei_exited(&skel->bss->uei)) { + printf("[SEQ %lu]\n", seq++); + printf("total :%10lu local:%10lu queued:%10lu lost:%10lu\n", + skel->bss->nr_total, + skel->bss->nr_locals, + skel->bss->nr_queued, + skel->bss->nr_lost_pids); + printf(" dispatch:%10lu mismatch:%10lu retry:%10lu\n", + skel->bss->nr_dispatches, + skel->bss->nr_mismatches, + skel->bss->nr_retries); + printf("overflow:%10lu\n", + skel->bss->nr_overflows); + fflush(stdout); + sleep(1); + } + + bpf_link__destroy(link); + uei_print(&skel->bss->uei); + scx_example_central__destroy(skel); + return 0; +} From patchwork Sat Jan 28 00:16:29 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 13119573 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 2EE3FC38142 for ; Sat, 28 Jan 2023 00:19:16 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230039AbjA1ATO (ORCPT ); Fri, 27 Jan 2023 19:19:14 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:36850 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232296AbjA1ASQ (ORCPT ); Fri, 27 Jan 2023 19:18:16 -0500 Received: from mail-pj1-x1033.google.com (mail-pj1-x1033.google.com [IPv6:2607:f8b0:4864:20::1033]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 5AABD8B7BB; Fri, 27 Jan 2023 16:17:39 -0800 (PST) Received: by mail-pj1-x1033.google.com with SMTP id b10so6154269pjo.1; Fri, 27 Jan 2023 16:17:39 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=KmNUXzdAjn+8g9pw3qJZQhYrmtjI+f2F3U/9NYvg2Vw=; b=o5+WMPXVXRBYK7dha0ku8Xa2C+kWCx+8oqvc0ixqooUm7Z2wyzoA8S/GNck3NAWdQL C35MhaaafwtZxDqbH0+te9gT7FKF29NTTczTCt1azcSL7ebadbcJJ5JHyvuHjR7uTylF jPyVCpDpUT7SPbM/jkLKpFUjnrceBsnr0XC9Q4XgCV8pHwl+MK+tJ0VajgnE0D7wAt+C 36E7iDeuQg+ojmPTXKwTgMpEGEkC4bVlLyXR+IFdFN37tJHGjaD4kWDYnJUH6ynL3NWP hfsL0dQM3ptiR6hMyefsUqNCGsjnza494/netLp5Z6p0820/CxtnF3V+cooOE+P41VI+ nOLA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=KmNUXzdAjn+8g9pw3qJZQhYrmtjI+f2F3U/9NYvg2Vw=; b=p/BLZJQQmFDY9YwxxdywETDJqV6wFiHZJ0S/trLpCKWrKZy2JT8+HAEjPmLAev+Xt0 ql/RNM9AeL7TOvj91sh96+Xq/CdQ5/gPMWFaqZe0B/ZchE1Ol1wIHZOTh1hv4P76G+CD PjZMruTFKGjy0A1TFsBhd62zWLAejQCjz4qia1ytfUmpcnBFRQtPo/7rDrZKDh9kiH9S foPYXakHNE720S4//toPBNCxPpfEdy5+wetw6XT4iOWIb0NyMahvGI694MHC9SA/WbZm kAA6a3oNvgbt1oUmjmyb5CxFEJYJ+2bhYDpRjfPf690wNXojSwvcAs/ZDSroxwauZP7b gK7g== X-Gm-Message-State: AFqh2krfnV8J5cKOOuGkP2FXup4j6+rZFUyJfrQ14ilGIr/vSr+kIVZ6 9Q8dPPjTAE5fkYA1+bELQ5Y= X-Google-Smtp-Source: AMrXdXtpa1bh/evWVHS3slEWwNN6R3g9OijVJpkUYAJO20YYK02UcmY3gwDWxa22EHeRt2A811GQJw== X-Received: by 2002:a05:6a20:6a92:b0:b8:7ef5:4308 with SMTP id bi18-20020a056a206a9200b000b87ef54308mr39732936pzb.23.1674865048967; Fri, 27 Jan 2023 16:17:28 -0800 (PST) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id p6-20020a056a000a0600b00581013fcbe1sm3149608pfh.159.2023.01.27.16.17.28 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 27 Jan 2023 16:17:28 -0800 (PST) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 20/30] sched_ext: Make watchdog handle ops.dispatch() looping stall Date: Fri, 27 Jan 2023 14:16:29 -1000 Message-Id: <20230128001639.3510083-21-tj@kernel.org> X-Mailer: git-send-email 2.39.1 In-Reply-To: <20230128001639.3510083-1-tj@kernel.org> References: <20230128001639.3510083-1-tj@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org The dispatch path retries if the local DSQ is still empty after ops.dispatch() either dispatched or consumed a task. This is both out of necessity and for convenience. It has to retry because the dispatch path might lose the tasks to dequeue while the rq lock is released while trying to migrate tasks across CPUs, and the retry mechanism makes ops.dispatch() implementation easier as it only needs to make some forward progress each iteration. However, this makes it possible for ops.dispatch() to stall CPUs by repeatedly dispatching ineligible tasks. If all CPUs are stalled that way, the watchdog or sysrq handler can't run and the system can't be saved. Let's address the issue by breaking out of the dispatch loop after 32 iterations. It is unlikely but not impossible for ops.dispatch() to legitimately go over the iteration limit. We want to come back to the dispatch path in such cases as not doing so risks stalling the CPU by idling with runnable tasks pending. As the previous task is still current in balance_scx(), resched_curr() doesn't do anything - it will just get cleared. Let's instead use scx_kick_bpf() which will trigger reschedule after switching to the next task which will likely be the idle task. Signed-off-by: Tejun Heo Reviewed-by: David Vernet --- kernel/sched/ext.c | 17 +++++++++++++++++ tools/sched_ext/scx_example_qmap.bpf.c | 17 +++++++++++++++++ tools/sched_ext/scx_example_qmap.c | 8 ++++++-- 3 files changed, 40 insertions(+), 2 deletions(-) diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 098edfc56a9b..9bc625676bbc 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -9,6 +9,7 @@ enum scx_internal_consts { SCX_NR_ONLINE_OPS = SCX_OP_IDX(init), SCX_DSP_DFL_MAX_BATCH = 32, + SCX_DSP_MAX_LOOPS = 32, SCX_WATCHDOG_MAX_TIMEOUT = 30 * HZ, }; @@ -168,6 +169,7 @@ static DEFINE_PER_CPU(struct scx_dsp_ctx, scx_dsp_ctx); void scx_bpf_dispatch(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags); +void scx_bpf_kick_cpu(s32 cpu, u64 flags); struct scx_task_iter { struct sched_ext_entity cursor; @@ -1243,6 +1245,7 @@ static int balance_scx(struct rq *rq, struct task_struct *prev, struct scx_rq *scx_rq = &rq->scx; struct scx_dsp_ctx *dspc = this_cpu_ptr(&scx_dsp_ctx); bool prev_on_scx = prev->sched_class == &ext_sched_class; + int nr_loops = SCX_DSP_MAX_LOOPS; lockdep_assert_rq_held(rq); @@ -1297,6 +1300,20 @@ static int balance_scx(struct rq *rq, struct task_struct *prev, return 1; if (consume_dispatch_q(rq, rf, &scx_dsq_global)) return 1; + + /* + * ops.dispatch() can trap us in this loop by repeatedly + * dispatching ineligible tasks. Break out once in a while to + * allow the watchdog to run. As IRQ can't be enabled in + * balance(), we want to complete this scheduling cycle and then + * start a new one. IOW, we want to call resched_curr() on the + * next, most likely idle, task, not the current one. Use + * scx_bpf_kick_cpu() for deferred kicking. + */ + if (unlikely(!--nr_loops)) { + scx_bpf_kick_cpu(cpu_of(rq), 0); + break; + } } while (dspc->nr_tasks); return 0; diff --git a/tools/sched_ext/scx_example_qmap.bpf.c b/tools/sched_ext/scx_example_qmap.bpf.c index ec8c4ee7ef16..e968a9b341a4 100644 --- a/tools/sched_ext/scx_example_qmap.bpf.c +++ b/tools/sched_ext/scx_example_qmap.bpf.c @@ -25,6 +25,7 @@ const volatile u64 slice_ns = SCX_SLICE_DFL; const volatile bool switch_all; const volatile u32 stall_user_nth; const volatile u32 stall_kernel_nth; +const volatile u32 dsp_inf_loop_after; const volatile s32 disallow_tgid; u32 test_error_cnt; @@ -184,6 +185,22 @@ void BPF_STRUCT_OPS(qmap_dispatch, s32 cpu, struct task_struct *prev) s32 pid; int i; + if (dsp_inf_loop_after && nr_dispatched > dsp_inf_loop_after) { + struct task_struct *p; + + /* + * PID 2 should be kthreadd which should mostly be idle and off + * the scheduler. Let's keep dispatching it to force the kernel + * to call this function over and over again. + */ + p = bpf_task_from_pid(2); + if (p) { + scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, slice_ns, 0); + bpf_task_release(p); + return; + } + } + if (!idx || !cnt) { scx_bpf_error("failed to lookup idx[%p], cnt[%p]", idx, cnt); return; diff --git a/tools/sched_ext/scx_example_qmap.c b/tools/sched_ext/scx_example_qmap.c index 30633122e6d5..820fe50bf43c 100644 --- a/tools/sched_ext/scx_example_qmap.c +++ b/tools/sched_ext/scx_example_qmap.c @@ -20,12 +20,13 @@ const char help_fmt[] = "\n" "See the top-level comment in .bpf.c for more details.\n" "\n" -"Usage: %s [-a] [-s SLICE_US] [-e COUNT] [-t COUNT] [-T COUNT] [-d PID]\n" +"Usage: %s [-a] [-s SLICE_US] [-e COUNT] [-t COUNT] [-T COUNT] [-l COUNT] [-d PID]\n" "\n" " -s SLICE_US Override slice duration\n" " -e COUNT Trigger scx_bpf_error() after COUNT enqueues\n" " -t COUNT Stall every COUNT'th user thread\n" " -T COUNT Stall every COUNT'th kernel thread\n" +" -l COUNT Trigger dispatch infinite looping after COUNT dispatches\n" " -d PID Disallow a process from switching into SCHED_EXT (-1 for self)\n" " -h Display this help and exit\n"; @@ -50,7 +51,7 @@ int main(int argc, char **argv) skel = scx_example_qmap__open(); assert(skel); - while ((opt = getopt(argc, argv, "ahs:e:t:T:d:")) != -1) { + while ((opt = getopt(argc, argv, "ahs:e:t:T:l:d:")) != -1) { switch (opt) { case 'a': skel->rodata->switch_all = true; @@ -67,6 +68,9 @@ int main(int argc, char **argv) case 'T': skel->rodata->stall_kernel_nth = strtoul(optarg, NULL, 0); break; + case 'l': + skel->rodata->dsp_inf_loop_after = strtoul(optarg, NULL, 0); + break; case 'd': skel->rodata->disallow_tgid = strtol(optarg, NULL, 0); if (skel->rodata->disallow_tgid < 0) From patchwork Sat Jan 28 00:16:30 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 13119576 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 8F24BC54EAA for ; Sat, 28 Jan 2023 00:19:28 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S233332AbjA1AT1 (ORCPT ); Fri, 27 Jan 2023 19:19:27 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:37276 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S233119AbjA1ASR (ORCPT ); Fri, 27 Jan 2023 19:18:17 -0500 Received: from mail-pf1-x42c.google.com (mail-pf1-x42c.google.com [IPv6:2607:f8b0:4864:20::42c]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 25CAD8B7AF; Fri, 27 Jan 2023 16:17:39 -0800 (PST) Received: by mail-pf1-x42c.google.com with SMTP id z1so1364338pfg.12; Fri, 27 Jan 2023 16:17:39 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=EL6l4GyppJeuoMhOYlGrh8XYIK9k9J2icjfC6nfKvKE=; b=KgRwsDKwpRyUF6WZ0KfgZvKuQmPE4gaAxPRADwRHqeAenv4OwimnsvGqOepdYFmoiN Fw0pDTU4p9hpgJ70xaFsYTZmsdxQ4KfwYo15tthjll4gMIESRJcv/pZ0I9OpYHSfmrX9 2w/yDuAVipvPXuZyxBZuERVuc9ANfapklzVoakNIG/WrwaQkTV1hInMH8hXOWw55CSzj 5dQ5QJOe30yQkyrnmGsP2b1zX+y0zUg5nVOztpVVvzMmTZ9/b7RRLFQEX7c7z/kEMRZl XvxPOMiDaXowOLm0CmWdtYgmb5ehVGYRsKfIiHlNzEaa0fMUrTJFNK/50NSij4kFDv9z aGEg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=EL6l4GyppJeuoMhOYlGrh8XYIK9k9J2icjfC6nfKvKE=; b=1zgnguE1jBT9YxdduNCGmZWeDlrGVZ+alo1seGUumfkIWB+BctqTA+ezUFviVTKNLa 4jz6CCYahUkDcUFMsLUhKiBskdIhnxHwz7k4Jdig+fjrosaYvC44cpVN85HeunALLcRw ReUkq3/SVtSMPSv6YP617ZnFV4TiVJX+St9xdCzghl9iYtaGB0RUesz7n/cLNVEWJU1o I+m7sMWOd5ynHHU+H8nCHIRQ2HDpgp+Tg74EyIDr9/u7AVC8Ogr3v4+v0pNY0QWc3J1a bhrdO+v6A3Fn3bsGMJAnm1sy3xRbCTOhSM3qurTP/dwJJDMPhW0y1T0sVqm7mhoI+yDt mlrA== X-Gm-Message-State: AO0yUKU8IV+Bng+gKaSTg80fKyKZ0RlI6T5ZsbWVfdJZPa7+U7018Xoc PJ8j6gWQx8gXpNztMIO+6z8= X-Google-Smtp-Source: AK7set9ASOUgqlO0AjamBITpY29W5rUev3hU4n74R22c344yebowhTIX1jJWxBiLBBZ+VY5i/m3log== X-Received: by 2002:a05:6a00:1243:b0:58b:b29c:50c2 with SMTP id u3-20020a056a00124300b0058bb29c50c2mr254140pfi.26.1674865050717; Fri, 27 Jan 2023 16:17:30 -0800 (PST) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id w188-20020a6262c5000000b0059394f7a583sm63954pfb.185.2023.01.27.16.17.30 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 27 Jan 2023 16:17:30 -0800 (PST) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 21/30] sched_ext: Add task state tracking operations Date: Fri, 27 Jan 2023 14:16:30 -1000 Message-Id: <20230128001639.3510083-22-tj@kernel.org> X-Mailer: git-send-email 2.39.1 In-Reply-To: <20230128001639.3510083-1-tj@kernel.org> References: <20230128001639.3510083-1-tj@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org Being able to track the task runnable and running state transitions are useful for a variety of purposes including latency tracking and load factor calculation. Currently, BPF schedulers don't have a good way of tracking these transitions. Becoming runnable can be determined from ops.enqueue() but becoming quiescent can only be inferred from the lack of subsequent enqueue. Also, as the local dsq can have multiple tasks and some events are handled in the sched_ext core, it's difficult to determine when a given task starts and stops executing. This patch adds sched_ext_ops.runnable(), .running(), .stopping() and .quiescent() operations to track the task runnable and running state transitions. They're mostly self explanatory; however, we want to ensure that running <-> stopping transitions are always contained within runnable <-> quiescent transitions which is a bit different from how the scheduler core behaves. This adds a bit of complication. See the comment in dequeue_task_scx(). Signed-off-by: Tejun Heo Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- include/linux/sched/ext.h | 65 +++++++++++++++++++++++++++++++++++++++ kernel/sched/ext.c | 31 +++++++++++++++++++ 2 files changed, 96 insertions(+) diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h index 10cd3ede5ae5..338b41cd79fa 100644 --- a/include/linux/sched/ext.h +++ b/include/linux/sched/ext.h @@ -193,6 +193,71 @@ struct sched_ext_ops { */ void (*dispatch)(s32 cpu, struct task_struct *prev); + /** + * runnable - A task is becoming runnable on its associated CPU + * @p: task becoming runnable + * @enq_flags: %SCX_ENQ_* + * + * This and the following three functions can be used to track a task's + * execution state transitions. A task becomes ->runnable() on a CPU, + * and then goes through one or more ->running() and ->stopping() pairs + * as it runs on the CPU, and eventually becomes ->quiescent() when it's + * done running on the CPU. + * + * @p is becoming runnable on the CPU because it's + * + * - waking up (%SCX_ENQ_WAKEUP) + * - being moved from another CPU + * - being restored after temporarily taken off the queue for an + * attribute change. + * + * This and ->enqueue() are related but not coupled. This operation + * notifies @p's state transition and may not be followed by ->enqueue() + * e.g. when @p is being dispatched to a remote CPU. Likewise, a task + * may be ->enqueue()'d without being preceded by this operation e.g. + * after exhausting its slice. + */ + void (*runnable)(struct task_struct *p, u64 enq_flags); + + /** + * running - A task is starting to run on its associated CPU + * @p: task starting to run + * + * See ->runnable() for explanation on the task state notifiers. + */ + void (*running)(struct task_struct *p); + + /** + * stopping - A task is stopping execution + * @p: task stopping to run + * @runnable: is task @p still runnable? + * + * See ->runnable() for explanation on the task state notifiers. If + * !@runnable, ->quiescent() will be invoked after this operation + * returns. + */ + void (*stopping)(struct task_struct *p, bool runnable); + + /** + * quiescent - A task is becoming not runnable on its associated CPU + * @p: task becoming not runnable + * @deq_flags: %SCX_DEQ_* + * + * See ->runnable() for explanation on the task state notifiers. + * + * @p is becoming quiescent on the CPU because it's + * + * - sleeping (%SCX_DEQ_SLEEP) + * - being moved to another CPU + * - being temporarily taken off the queue for an attribute change + * (%SCX_DEQ_SAVE) + * + * This and ->dequeue() are related but not coupled. This operation + * notifies @p's state transition and may not be preceded by ->dequeue() + * e.g. when @p is being dispatched to a remote CPU. + */ + void (*quiescent)(struct task_struct *p, u64 deq_flags); + /** * yield - Yield CPU * @from: yielding task diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 9bc625676bbc..4acaf39ea879 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -743,6 +743,9 @@ static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags rq->scx.nr_running++; add_nr_running(rq, 1); + if (SCX_HAS_OP(runnable)) + scx_ops.runnable(p, enq_flags); + do_enqueue_task(rq, p, enq_flags, sticky_cpu); } @@ -803,6 +806,26 @@ static void dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags ops_dequeue(p, deq_flags); + /* + * A currently running task which is going off @rq first gets dequeued + * and then stops running. As we want running <-> stopping transitions + * to be contained within runnable <-> quiescent transitions, trigger + * ->stopping() early here instead of in put_prev_task_scx(). + * + * @p may go through multiple stopping <-> running transitions between + * here and put_prev_task_scx() if task attribute changes occur while + * balance_scx() leaves @rq unlocked. However, they don't contain any + * information meaningful to the BPF scheduler and can be suppressed by + * skipping the callbacks if the task is !QUEUED. + */ + if (SCX_HAS_OP(stopping) && task_current(rq, p)) { + update_curr_scx(rq); + scx_ops.stopping(p, false); + } + + if (SCX_HAS_OP(quiescent)) + scx_ops.quiescent(p, deq_flags); + if (deq_flags & SCX_DEQ_SLEEP) p->scx.flags |= SCX_TASK_DEQD_FOR_SLEEP; else @@ -1328,6 +1351,10 @@ static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first) p->se.exec_start = rq_clock_task(rq); + /* see dequeue_task_scx() on why we skip when !QUEUED */ + if (SCX_HAS_OP(running) && (p->scx.flags & SCX_TASK_QUEUED)) + scx_ops.running(p); + watchdog_unwatch_task(p, true); } @@ -1366,6 +1393,10 @@ static void put_prev_task_scx(struct rq *rq, struct task_struct *p) update_curr_scx(rq); + /* see dequeue_task_scx() on why we skip when !QUEUED */ + if (SCX_HAS_OP(stopping) && (p->scx.flags & SCX_TASK_QUEUED)) + scx_ops.stopping(p, true); + /* * If we're being called from put_prev_task_balance(), balance_scx() may * have decided that @p should keep running. From patchwork Sat Jan 28 00:16:31 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 13119575 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 7CEE4C38142 for ; Sat, 28 Jan 2023 00:19:26 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S233285AbjA1ATY (ORCPT ); Fri, 27 Jan 2023 19:19:24 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:37224 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232949AbjA1ASR (ORCPT ); Fri, 27 Jan 2023 19:18:17 -0500 Received: from mail-pg1-x52b.google.com (mail-pg1-x52b.google.com [IPv6:2607:f8b0:4864:20::52b]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 8C1808CC65; Fri, 27 Jan 2023 16:17:39 -0800 (PST) Received: by mail-pg1-x52b.google.com with SMTP id v3so4242597pgh.4; Fri, 27 Jan 2023 16:17:39 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=ATY0h3e+Tvo49K82Mus00K850n6y5PY12xFIXhveTmI=; b=Nh47i8VGfvLXOx1xnjBhgTsefAJKOy2PqOBKWOkswMC6HZNgsW5QntKEZtM0HqWwU8 nfdKlAVyFwuCIowuzIJf8GglVzFOlXruzRyfnw1OzcJ8FYJBIkxGYbChciWYC2FOIJ5q PQtVX3Ly37r0zrWfVFCjbJPzgmwUyk/ux8rDxbT62aMXJTpuoqzYnPoWCkRh1hb27h77 s8BeChuDST+Wx3nrj0T7MFFOzK2aNuqDfX64F7WDGAoc+zk4Dg/B8ZSukB3AqFxi0tYY CgQOUwm4hn/FXXSZm2RpmGGgW/O/PoHMLwVy1/pUiJr60OvVjAa9bHxofeyhQAxapOHs NDPw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=ATY0h3e+Tvo49K82Mus00K850n6y5PY12xFIXhveTmI=; b=rMC5sfIPKook3xXZGXFZUpB3SdQn4xz65mL6nsITFyv1pKWizhc8/y1PmQc546SEF4 ggERglUMdnO3leJvrf9uvwhpibrjzr3PtDkxGH45ik/ACji9fzwxGxLdwOyc9lqECjVf EKFZpoxwtjfSD9AXmeuLl7n6j8Psql74vMWvXEQgy8jNqHrWyzZdF05a2tHxvRx84xRE /iRpUsR5WzhZhmEv5wuxsK06qSJqjwoVmnJQMWBwXh/wb9JIf2Xz/gAl0D2M1vWWX0Yy eNLO14+YPUIfNvqNb07/uVeKpA0B3KkFvswS2O1pW1O1yQvg3EbIT+gJZ55cwBaByUwM S7Sg== X-Gm-Message-State: AO0yUKUu3p4j+25m6GbJLRkYXTi49FMYZd51bc3gv+r2pXDOiG1K6uUo 6udxS9bVHMgb5RV805LQlf0= X-Google-Smtp-Source: AK7set8ynrPZutSSbtwfSExjYJXecypmwnEDilAq2BCbBgsK3fM0V4XEflwh62hSVAPwbNTuq8uTZw== X-Received: by 2002:a62:e505:0:b0:582:c408:3ed1 with SMTP id n5-20020a62e505000000b00582c4083ed1mr244852pff.32.1674865052987; Fri, 27 Jan 2023 16:17:32 -0800 (PST) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id k81-20020a628454000000b00589a7824703sm3159565pfd.194.2023.01.27.16.17.32 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 27 Jan 2023 16:17:32 -0800 (PST) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 22/30] sched_ext: Implement tickless support Date: Fri, 27 Jan 2023 14:16:31 -1000 Message-Id: <20230128001639.3510083-23-tj@kernel.org> X-Mailer: git-send-email 2.39.1 In-Reply-To: <20230128001639.3510083-1-tj@kernel.org> References: <20230128001639.3510083-1-tj@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org Allow BPF schedulers to indicate tickless operation by setting p->scx.slice to SCX_SLICE_INF. A CPU whose current task has infinte slice goes into tickless operation. scx_example_central is updated to use tickless operations for all tasks and instead use a BPF timer to expire slices. This also uses the SCX_ENQ_PREEMPT and task state tracking added by the previous patches. Currently, there is no way to pin the timer on the central CPU, so it may end up on one of the worker CPUs; however, outside of that, the worker CPUs can go tickless both while running sched_ext tasks and idling. With schbench running, scx_example_central shows: root@test ~# grep ^LOC /proc/interrupts; sleep 10; grep ^LOC /proc/interrupts LOC: 142024 656 664 449 Local timer interrupts LOC: 161663 663 665 449 Local timer interrupts Without it: root@test ~ [SIGINT]# grep ^LOC /proc/interrupts; sleep 10; grep ^LOC /proc/interrupts LOC: 188778 3142 3793 3993 Local timer interrupts LOC: 198993 5314 6323 6438 Local timer interrupts While scx_example_central itself is too barebone to be useful as a production scheduler, a more featureful central scheduler can be built using the same approach. Google's experience shows that such an approach can have significant benefits for certain applications such as VM hosting. Signed-off-by: Tejun Heo Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- include/linux/sched/ext.h | 1 + kernel/sched/core.c | 9 +- kernel/sched/ext.c | 43 +++++++- kernel/sched/ext.h | 2 + kernel/sched/sched.h | 6 ++ tools/sched_ext/scx_example_central.bpf.c | 118 ++++++++++++++++++++-- tools/sched_ext/scx_example_central.c | 3 +- 7 files changed, 170 insertions(+), 12 deletions(-) diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h index 338b41cd79fa..8f19ee7e5433 100644 --- a/include/linux/sched/ext.h +++ b/include/linux/sched/ext.h @@ -19,6 +19,7 @@ enum scx_consts { SCX_EXIT_MSG_LEN = 1024, SCX_SLICE_DFL = 20 * NSEC_PER_MSEC, + SCX_SLICE_INF = U64_MAX, /* infinite, implies nohz */ }; /* diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 5b68b822312b..d2419bd4fb5e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1200,13 +1200,16 @@ bool sched_can_stop_tick(struct rq *rq) return true; /* - * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left; - * if there's more than one we need the tick for involuntary - * preemption. + * If there are no DL,RR/FIFO tasks, there must only be CFS or SCX tasks + * left. For CFS, if there's more than one we need the tick for + * involuntary preemption. For SCX, ask. */ if (!scx_switched_all() && rq->nr_running > 1) return false; + if (scx_enabled() && !scx_can_stop_tick(rq)) + return false; + return true; } #endif /* CONFIG_NO_HZ_FULL */ diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 4acaf39ea879..5e94f9604e23 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -447,7 +447,8 @@ static void update_curr_scx(struct rq *rq) account_group_exec_runtime(curr, delta_exec); cgroup_account_cputime(curr, delta_exec); - curr->scx.slice -= min(curr->scx.slice, delta_exec); + if (curr->scx.slice != SCX_SLICE_INF) + curr->scx.slice -= min(curr->scx.slice, delta_exec); } static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p, @@ -1356,6 +1357,20 @@ static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first) scx_ops.running(p); watchdog_unwatch_task(p, true); + + /* + * @p is getting newly scheduled or got kicked after someone updated its + * slice. Refresh whether tick can be stopped. See can_stop_tick_scx(). + */ + if ((p->scx.slice == SCX_SLICE_INF) != + (bool)(rq->scx.flags & SCX_RQ_CAN_STOP_TICK)) { + if (p->scx.slice == SCX_SLICE_INF) + rq->scx.flags |= SCX_RQ_CAN_STOP_TICK; + else + rq->scx.flags &= ~SCX_RQ_CAN_STOP_TICK; + + sched_update_tick_dependency(rq); + } } static void put_prev_task_scx(struct rq *rq, struct task_struct *p) @@ -1891,6 +1906,26 @@ int scx_check_setscheduler(struct task_struct *p, int policy) return 0; } +#ifdef CONFIG_NO_HZ_FULL +bool scx_can_stop_tick(struct rq *rq) +{ + struct task_struct *p = rq->curr; + + if (scx_ops_disabling()) + return false; + + if (p->sched_class != &ext_sched_class) + return true; + + /* + * @rq can dispatch from different DSQs, so we can't tell whether it + * needs the tick or not by looking at nr_running. Allow stopping ticks + * iff the BPF scheduler indicated so. See set_next_task_scx(). + */ + return rq->scx.flags & SCX_RQ_CAN_STOP_TICK; +} +#endif + /* * Omitted operations: * @@ -2051,7 +2086,7 @@ static void scx_ops_disable_workfn(struct kthread_work *work) struct rhashtable_iter rht_iter; struct scx_dispatch_q *dsq; const char *reason; - int i, type; + int i, cpu, type; type = atomic_read(&scx_exit_type); while (true) { @@ -2148,6 +2183,10 @@ static void scx_ops_disable_workfn(struct kthread_work *work) scx_task_iter_exit(&sti); spin_unlock_irq(&scx_tasks_lock); + /* kick all CPUs to restore ticks */ + for_each_possible_cpu(cpu) + resched_cpu(cpu); + forward_progress_guaranteed: /* * Here, every runnable task is guaranteed to make forward progress and diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h index 0b04626e8ca2..9c9284f91e38 100644 --- a/kernel/sched/ext.h +++ b/kernel/sched/ext.h @@ -82,6 +82,7 @@ int scx_fork(struct task_struct *p); void scx_post_fork(struct task_struct *p); void scx_cancel_fork(struct task_struct *p); int scx_check_setscheduler(struct task_struct *p, int policy); +bool scx_can_stop_tick(struct rq *rq); void init_sched_ext_class(void); __printf(2, 3) void scx_ops_error_type(enum scx_exit_type type, @@ -141,6 +142,7 @@ static inline void scx_post_fork(struct task_struct *p) {} static inline void scx_cancel_fork(struct task_struct *p) {} static inline int scx_check_setscheduler(struct task_struct *p, int policy) { return 0; } +static inline bool scx_can_stop_tick(struct rq *rq) { return true; } static inline void init_sched_ext_class(void) {} static inline void scx_notify_sched_tick(void) {} diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 3c16caecd3a5..2447af22783c 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -685,11 +685,17 @@ struct cfs_rq { }; #ifdef CONFIG_SCHED_CLASS_EXT +/* scx_rq->flags, protected by the rq lock */ +enum scx_rq_flags { + SCX_RQ_CAN_STOP_TICK = 1 << 0, +}; + struct scx_rq { struct scx_dispatch_q local_dsq; struct list_head watchdog_list; u64 ops_qseq; u32 nr_running; + u32 flags; cpumask_var_t cpus_to_kick; cpumask_var_t cpus_to_preempt; struct irq_work kick_cpus_irq_work; diff --git a/tools/sched_ext/scx_example_central.bpf.c b/tools/sched_ext/scx_example_central.bpf.c index 6b246bf308d0..376486674ddb 100644 --- a/tools/sched_ext/scx_example_central.bpf.c +++ b/tools/sched_ext/scx_example_central.bpf.c @@ -13,7 +13,26 @@ * through per-CPU BPF queues. The current design is chosen to maximally * utilize and verify various scx mechanisms such as LOCAL_ON dispatching. * - * b. Preemption + * b. Tickless operation + * + * All tasks are dispatched with the infinite slice which allows stopping the + * ticks on CONFIG_NO_HZ_FULL kernels running with the proper nohz_full + * parameter. The tickless operation can be observed through + * /proc/interrupts. + * + * Periodic switching is enforced by a periodic timer checking all CPUs and + * preempting them as necessary. Unfortunately, BPF timer currently doesn't + * have a way to pin to a specific CPU, so the periodic timer isn't pinned to + * the central CPU. + * + * c. Preemption + * + * Kthreads are unconditionally queued to the head of a matching local dsq + * and dispatched with SCX_DSQ_PREEMPT. This ensures that a kthread is always + * prioritized over user threads, which is required for ensuring forward + * progress as e.g. the periodic timer may run on a ksoftirqd and if the + * ksoftirqd gets starved by a user thread, there may not be anything else to + * vacate that user thread. * * SCX_KICK_PREEMPT is used to trigger scheduling and CPUs to move to the * next tasks. @@ -38,7 +57,7 @@ const volatile s32 central_cpu; const volatile u32 nr_cpu_ids; u64 nr_total, nr_locals, nr_queued, nr_lost_pids; -u64 nr_dispatches, nr_mismatches, nr_retries; +u64 nr_timers, nr_dispatches, nr_mismatches, nr_retries; u64 nr_overflows; struct user_exit_info uei; @@ -51,6 +70,7 @@ struct { /* can't use percpu map due to bad lookups */ static bool cpu_gimme_task[MAX_CPUS]; +static u64 cpu_started_at[MAX_CPUS]; struct central_timer { struct bpf_timer timer; @@ -86,9 +106,22 @@ void BPF_STRUCT_OPS(central_enqueue, struct task_struct *p, u64 enq_flags) __sync_fetch_and_add(&nr_total, 1); + /* + * Push per-cpu kthreads at the head of local dsq's and preempt the + * corresponding CPU. This ensures that e.g. ksoftirqd isn't blocked + * behind other threads which is necessary for forward progress + * guarantee as we depend on the BPF timer which may run from ksoftirqd. + */ + if ((p->flags & PF_KTHREAD) && p->nr_cpus_allowed == 1) { + __sync_fetch_and_add(&nr_locals, 1); + scx_bpf_dispatch(p, SCX_DSQ_LOCAL, SCX_SLICE_INF, + enq_flags | SCX_ENQ_PREEMPT); + return; + } + if (bpf_map_push_elem(¢ral_q, &pid, 0)) { __sync_fetch_and_add(&nr_overflows, 1); - scx_bpf_dispatch(p, FALLBACK_DSQ_ID, SCX_SLICE_DFL, enq_flags); + scx_bpf_dispatch(p, FALLBACK_DSQ_ID, SCX_SLICE_INF, enq_flags); return; } @@ -122,13 +155,13 @@ static int dispatch_a_task_loopfn(u32 idx, void *data) */ if (!bpf_cpumask_test_cpu(cpu, p->cpus_ptr)) { __sync_fetch_and_add(&nr_mismatches, 1); - scx_bpf_dispatch(p, FALLBACK_DSQ_ID, SCX_SLICE_DFL, 0); + scx_bpf_dispatch(p, FALLBACK_DSQ_ID, SCX_SLICE_INF, 0); bpf_task_release(p); return 0; } /* dispatch to the local and mark that @cpu doesn't need more tasks */ - scx_bpf_dispatch(p, SCX_DSQ_LOCAL_ON | cpu, SCX_SLICE_DFL, 0); + scx_bpf_dispatch(p, SCX_DSQ_LOCAL_ON | cpu, SCX_SLICE_INF, 0); if (cpu != central_cpu) scx_bpf_kick_cpu(cpu, 0); @@ -200,12 +233,83 @@ void BPF_STRUCT_OPS(central_dispatch, s32 cpu, struct task_struct *prev) } } +void BPF_STRUCT_OPS(central_running, struct task_struct *p) +{ + s32 cpu = scx_bpf_task_cpu(p); + u64 *started_at = MEMBER_VPTR(cpu_started_at, [cpu]); + if (started_at) + *started_at = bpf_ktime_get_ns() ?: 1; /* 0 indicates idle */ +} + +void BPF_STRUCT_OPS(central_stopping, struct task_struct *p, bool runnable) +{ + s32 cpu = scx_bpf_task_cpu(p); + u64 *started_at = MEMBER_VPTR(cpu_started_at, [cpu]); + if (started_at) + *started_at = 0; +} + +static int kick_cpus_loopfn(u32 idx, void *data) +{ + s32 cpu = (nr_timers + idx) % nr_cpu_ids; + u64 *nr_to_kick = data; + u64 now = bpf_ktime_get_ns(); + u64 *started_at; + s32 pid; + + if (cpu == central_cpu) + goto kick; + + /* kick iff there's something pending */ + if (scx_bpf_dsq_nr_queued(FALLBACK_DSQ_ID) || + scx_bpf_dsq_nr_queued(SCX_DSQ_LOCAL_ON | cpu)) + ; + else if (*nr_to_kick) + (*nr_to_kick)--; + else + return 0; + + /* and the current one exhausted its slice */ + started_at = MEMBER_VPTR(cpu_started_at, [cpu]); + if (started_at && *started_at && + vtime_before(now, *started_at + SCX_SLICE_DFL)) + return 0; +kick: + scx_bpf_kick_cpu(cpu, SCX_KICK_PREEMPT); + return 0; +} + +static int central_timerfn(void *map, int *key, struct bpf_timer *timer) +{ + u64 nr_to_kick = nr_queued; + + bpf_loop(nr_cpu_ids, kick_cpus_loopfn, &nr_to_kick, 0); + bpf_timer_start(timer, TIMER_INTERVAL_NS, 0); + __sync_fetch_and_add(&nr_timers, 1); + return 0; +} + int BPF_STRUCT_OPS_SLEEPABLE(central_init) { + u32 key = 0; + struct bpf_timer *timer; + int ret; + if (switch_all) scx_bpf_switch_all(); - return scx_bpf_create_dsq(FALLBACK_DSQ_ID, -1); + ret = scx_bpf_create_dsq(FALLBACK_DSQ_ID, -1); + if (ret) + return ret; + + timer = bpf_map_lookup_elem(¢ral_timer, &key); + if (!timer) + return -ESRCH; + + bpf_timer_init(timer, ¢ral_timer, CLOCK_MONOTONIC); + bpf_timer_set_callback(timer, central_timerfn); + ret = bpf_timer_start(timer, TIMER_INTERVAL_NS, 0); + return ret; } void BPF_STRUCT_OPS(central_exit, struct scx_exit_info *ei) @@ -225,6 +329,8 @@ struct sched_ext_ops central_ops = { .select_cpu = (void *)central_select_cpu, .enqueue = (void *)central_enqueue, .dispatch = (void *)central_dispatch, + .running = (void *)central_running, + .stopping = (void *)central_stopping, .init = (void *)central_init, .exit = (void *)central_exit, .name = "central", diff --git a/tools/sched_ext/scx_example_central.c b/tools/sched_ext/scx_example_central.c index 14f6598c03df..fc01e5149371 100644 --- a/tools/sched_ext/scx_example_central.c +++ b/tools/sched_ext/scx_example_central.c @@ -76,7 +76,8 @@ int main(int argc, char **argv) skel->bss->nr_locals, skel->bss->nr_queued, skel->bss->nr_lost_pids); - printf(" dispatch:%10lu mismatch:%10lu retry:%10lu\n", + printf("timer :%10lu dispatch:%10lu mismatch:%10lu retry:%10lu\n", + skel->bss->nr_timers, skel->bss->nr_dispatches, skel->bss->nr_mismatches, skel->bss->nr_retries); From patchwork Sat Jan 28 00:16:32 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 13119579 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 84DCDC61DA7 for ; Sat, 28 Jan 2023 00:19:31 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S233369AbjA1AT3 (ORCPT ); Fri, 27 Jan 2023 19:19:29 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:37376 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232371AbjA1ASV (ORCPT ); Fri, 27 Jan 2023 19:18:21 -0500 Received: from mail-pj1-x1032.google.com (mail-pj1-x1032.google.com [IPv6:2607:f8b0:4864:20::1032]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 30D3C7CC88; Fri, 27 Jan 2023 16:17:40 -0800 (PST) Received: by mail-pj1-x1032.google.com with SMTP id w6-20020a17090ac98600b0022c58cc7a18so1748078pjt.1; Fri, 27 Jan 2023 16:17:40 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=JGbgpvSpMzmYHXNuuc53c+70J+sIZFP/ksryl3J37AY=; b=juF20Qe3Flopw/poOy1yfHR0G9Ne1zffhWSW38b4L9uC7k33al9fz5FyU9V7FLn7EP Bnem6nhSAq5eKdX2ciTtZmqDSQJ00xZrDKdPfQGxMw96TqtYm56mkz+8NI9OKtfunfmm 9iWmsE1UcPovlUooS32xjAy4i4CtdYh1YyhihlEH9LAeiJrN/RNdRspF67IDqBz6TMyY nVaE1bSNdz83wKYqk/hw37bk9je31Us6W2DxSGpw8hNnxBXhGKDrjz9+hN2mhAh5nObT 9SvvEZFJnb+0KrAbmMAT0+Imwxd18ffda/+9kGiEPjtnlz9yLZ61c5vNmCkS0BJYkVI+ hbIA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=JGbgpvSpMzmYHXNuuc53c+70J+sIZFP/ksryl3J37AY=; b=GirlLVm+BuInhJv0Kro/D7uuuAs0rFoe4pXAzISaOphOCHP1dzyIjE4RcbP4k86oqZ 9e01cJVpMYieaBOi1Xewo02j2sxnSmP31CzfqXFCqv+pRDUULePOlhM6/Eo7ydSVaYDN vSuowblaSu6F07CyjiWEwfX8bY/8ODetsRrw8reriJ4vyLYOoEKPoI3RbpprpkiswXSw cB9IT4H0HFqkFN1j6m2oqDKQCeVeoZYAzJmk1gdjcmvc6oVIBLIu684vg+A818GM3BVn rwYxf+b+4k3M4E3jOdqJnLtbqyOKyBup5quyTuqebrHK8kw6aeum1DNC+WwpSszPJYoz 6n1A== X-Gm-Message-State: AFqh2kqAEC/rf/0pljMrGXkwBTFWMeYQnaI1mXJpx+XdXMJdML3oqvDB s0k9TAD8fGsemz/0MhPmVbY= X-Google-Smtp-Source: AMrXdXtNzQivU3XT0Ai4vuUfHy4PYVQjsEBmni9do/nfftcnuRCwmFBUw4E/7fbNljCb8gO3sjsH/Q== X-Received: by 2002:a17:902:834c:b0:194:bea4:57d2 with SMTP id z12-20020a170902834c00b00194bea457d2mr33723697pln.46.1674865054932; Fri, 27 Jan 2023 16:17:34 -0800 (PST) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id s15-20020a170902ea0f00b001949c0d7a33sm69640plg.7.2023.01.27.16.17.34 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 27 Jan 2023 16:17:34 -0800 (PST) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 23/30] sched_ext: Add cgroup support Date: Fri, 27 Jan 2023 14:16:32 -1000 Message-Id: <20230128001639.3510083-24-tj@kernel.org> X-Mailer: git-send-email 2.39.1 In-Reply-To: <20230128001639.3510083-1-tj@kernel.org> References: <20230128001639.3510083-1-tj@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org Add sched_ext_ops operations to init/exit cgroups, and track task migrations and config changes. Because different BPF schedulers may implement different subsets of CPU control features, allow BPF schedulers to pick which cgroup interface files to enable using SCX_OPS_CGROUP_KNOB_* flags. For now, only the weight knobs are supported but adding more should be straightforward. While a BPF scheduler is being enabled and disabled, relevant cgroup operations are locked out using scx_cgroup_rwsem. This avoids situations like task prep taking place while the task is being moved across cgroups, making things easier for BPF schedulers. This patch also adds scx_example_pair which implements a variant of core scheduling where a hyperthread pair only run tasks from the same cgroup. The BPF scheduler achieves this by putting tasks into per-cgroup queues, time-slicing the cgroup to run for each pair first, and then scheduling within the cgroup. See the header comment in scx_example_pair.bpf.c for more details. Note that scx_example_pair's cgroup-boundary guarantee breaks down for tasks running in higher priority scheduler classes. This will be addressed by a followup patch which implements a mechanism to track CPU preemption. Signed-off-by: Tejun Heo Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- include/linux/sched/ext.h | 96 ++++- init/Kconfig | 5 + kernel/sched/core.c | 68 ++- kernel/sched/ext.c | 341 ++++++++++++++- kernel/sched/ext.h | 23 + kernel/sched/sched.h | 12 +- tools/sched_ext/.gitignore | 1 + tools/sched_ext/Makefile | 9 +- tools/sched_ext/scx_example_pair.bpf.c | 555 +++++++++++++++++++++++++ tools/sched_ext/scx_example_pair.c | 143 +++++++ tools/sched_ext/scx_example_pair.h | 10 + 11 files changed, 1239 insertions(+), 24 deletions(-) create mode 100644 tools/sched_ext/scx_example_pair.bpf.c create mode 100644 tools/sched_ext/scx_example_pair.c create mode 100644 tools/sched_ext/scx_example_pair.h diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h index 8f19ee7e5433..11d6902e717d 100644 --- a/include/linux/sched/ext.h +++ b/include/linux/sched/ext.h @@ -12,6 +12,8 @@ #include #include +struct cgroup; + enum scx_consts { SCX_OPS_NAME_LEN = 128, SCX_EXIT_REASON_LEN = 128, @@ -109,14 +111,27 @@ enum scx_ops_flags { */ SCX_OPS_ENQ_EXITING = 1LLU << 2, + /* + * CPU cgroup knob enable flags + */ + SCX_OPS_CGROUP_KNOB_WEIGHT = 1LLU << 16, /* cpu.weight */ + SCX_OPS_ALL_FLAGS = SCX_OPS_KEEP_BUILTIN_IDLE | SCX_OPS_ENQ_LAST | - SCX_OPS_ENQ_EXITING, + SCX_OPS_ENQ_EXITING | + SCX_OPS_CGROUP_KNOB_WEIGHT, }; /* argument container for ops.enable() and friends */ struct scx_enable_args { - /* empty for now */ + /* the cgroup the task is joining */ + struct cgroup *cgroup; +}; + +/* argument container for ops->cgroup_init() */ +struct scx_cgroup_init_args { + /* the weight of the cgroup [1..10000] */ + u32 weight; }; /** @@ -325,7 +340,8 @@ struct sched_ext_ops { * @p: task to enable BPF scheduling for * @args: enable arguments, see the struct definition * - * Enable @p for BPF scheduling. @p will start running soon. + * Enable @p for BPF scheduling. @p is now in the cgroup specified for + * the preceding prep_enable() and will start running soon. */ void (*enable)(struct task_struct *p, struct scx_enable_args *args); @@ -349,6 +365,77 @@ struct sched_ext_ops { */ void (*disable)(struct task_struct *p); + /** + * cgroup_init - Initialize a cgroup + * @cgrp: cgroup being initialized + * @args: init arguments, see the struct definition + * + * Either the BPF scheduler is being loaded or @cgrp created, initialize + * @cgrp for sched_ext. This operation may block. + * + * Return 0 for success, -errno for failure. An error return while + * loading will abort loading of the BPF scheduler. During cgroup + * creation, it will abort the specific cgroup creation. + */ + s32 (*cgroup_init)(struct cgroup *cgrp, + struct scx_cgroup_init_args *args); + + /** + * cgroup_exit - Exit a cgroup + * @cgrp: cgroup being exited + * + * Either the BPF scheduler is being unloaded or @cgrp destroyed, exit + * @cgrp for sched_ext. This operation my block. + */ + void (*cgroup_exit)(struct cgroup *cgrp); + + /** + * cgroup_prep_move - Prepare a task to be moved to a different cgroup + * @p: task being moved + * @from: cgroup @p is being moved from + * @to: cgroup @p is being moved to + * + * Prepare @p for move from cgroup @from to @to. This operation may + * block and can be used for allocations. + * + * Return 0 for success, -errno for failure. An error return aborts the + * migration. + */ + s32 (*cgroup_prep_move)(struct task_struct *p, + struct cgroup *from, struct cgroup *to); + + /** + * cgroup_move - Commit cgroup move + * @p: task being moved + * @from: cgroup @p is being moved from + * @to: cgroup @p is being moved to + * + * Commit the move. @p is dequeued during this operation. + */ + void (*cgroup_move)(struct task_struct *p, + struct cgroup *from, struct cgroup *to); + + /** + * cgroup_cancel_move - Cancel cgroup move + * @p: task whose cgroup move is being canceled + * @from: cgroup @p was being moved from + * @to: cgroup @p was being moved to + * + * @p was cgroup_prep_move()'d but failed before reaching cgroup_move(). + * Undo the preparation. + */ + void (*cgroup_cancel_move)(struct task_struct *p, + struct cgroup *from, struct cgroup *to); + + /** + * cgroup_set_weight - A cgroup's weight is being changed + * @cgrp: cgroup whose weight is being updated + * @weight: new weight [1..10000] + * + * Update @tg's weight to @weight. + */ + void (*cgroup_set_weight)(struct cgroup *cgrp, u32 weight); + /* * All online ops must come before ops.init(). */ @@ -483,6 +570,9 @@ struct sched_ext_entity { /* cold fields */ struct list_head tasks_node; +#ifdef CONFIG_EXT_GROUP_SCHED + struct cgroup *cgrp_moving_from; +#endif }; void sched_ext_free(struct task_struct *p); diff --git a/init/Kconfig b/init/Kconfig index c93f19dd872e..13faaed9dc97 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1039,6 +1039,11 @@ config RT_GROUP_SCHED realtime bandwidth for them. See Documentation/scheduler/sched-rt-group.rst for more information. +config EXT_GROUP_SCHED + bool + depends on SCHED_CLASS_EXT && CGROUP_SCHED + default y + endif #CGROUP_SCHED config UCLAMP_TASK_GROUP diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d2419bd4fb5e..a2a241bc24e7 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -9786,6 +9786,9 @@ void __init sched_init(void) root_task_group.shares = ROOT_TASK_GROUP_LOAD; init_cfs_bandwidth(&root_task_group.cfs_bandwidth); #endif /* CONFIG_FAIR_GROUP_SCHED */ +#ifdef CONFIG_EXT_GROUP_SCHED + root_task_group.scx_weight = CGROUP_WEIGHT_DFL; +#endif /* CONFIG_EXT_GROUP_SCHED */ #ifdef CONFIG_RT_GROUP_SCHED root_task_group.rt_se = (struct sched_rt_entity **)ptr; ptr += nr_cpu_ids * sizeof(void **); @@ -10242,6 +10245,7 @@ struct task_group *sched_create_group(struct task_group *parent) if (!alloc_rt_sched_group(tg, parent)) goto err; + scx_group_set_weight(tg, CGROUP_WEIGHT_DFL); alloc_uclamp_sched_group(tg, parent); return tg; @@ -10345,6 +10349,7 @@ void sched_move_task(struct task_struct *tsk) SCHED_CHANGE_BLOCK(rq, tsk, DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK) { sched_change_group(tsk); + scx_move_task(tsk); } /* @@ -10381,6 +10386,11 @@ static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) { struct task_group *tg = css_tg(css); struct task_group *parent = css_tg(css->parent); + int ret; + + ret = scx_tg_online(tg); + if (ret) + return ret; if (parent) sched_online_group(tg, parent); @@ -10397,6 +10407,13 @@ static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) return 0; } +static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css) +{ + struct task_group *tg = css_tg(css); + + scx_tg_offline(tg); +} + static void cpu_cgroup_css_released(struct cgroup_subsys_state *css) { struct task_group *tg = css_tg(css); @@ -10414,9 +10431,10 @@ static void cpu_cgroup_css_free(struct cgroup_subsys_state *css) sched_unregister_group(tg); } -#ifdef CONFIG_RT_GROUP_SCHED +#if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_EXT_GROUP_SCHED) static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) { +#ifdef CONFIG_RT_GROUP_SCHED struct task_struct *task; struct cgroup_subsys_state *css; @@ -10424,7 +10442,8 @@ static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) if (!sched_rt_can_attach(css_tg(css), task)) return -EINVAL; } - return 0; +#endif + return scx_cgroup_can_attach(tset); } #endif @@ -10435,7 +10454,16 @@ static void cpu_cgroup_attach(struct cgroup_taskset *tset) cgroup_taskset_for_each(task, css, tset) sched_move_task(task); + + scx_cgroup_finish_attach(); +} + +#ifdef CONFIG_EXT_GROUP_SCHED +static void cpu_cgroup_cancel_attach(struct cgroup_taskset *tset) +{ + scx_cgroup_cancel_attach(tset); } +#endif #ifdef CONFIG_UCLAMP_TASK_GROUP static void cpu_util_update_eff(struct cgroup_subsys_state *css) @@ -10618,9 +10646,15 @@ static int cpu_uclamp_max_show(struct seq_file *sf, void *v) static int cpu_shares_write_u64(struct cgroup_subsys_state *css, struct cftype *cftype, u64 shareval) { + int ret; + if (shareval > scale_load_down(ULONG_MAX)) shareval = MAX_SHARES; - return sched_group_set_shares(css_tg(css), scale_load(shareval)); + ret = sched_group_set_shares(css_tg(css), scale_load(shareval)); + if (!ret) + scx_group_set_weight(css_tg(css), + sched_weight_to_cgroup(shareval)); + return ret; } static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css, @@ -11084,11 +11118,15 @@ static int cpu_extra_stat_show(struct seq_file *sf, return 0; } -#ifdef CONFIG_FAIR_GROUP_SCHED +#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_EXT_GROUP_SCHED) static unsigned long tg_weight(struct task_group *tg) { +#ifdef CONFIG_FAIR_GROUP_SCHED return scale_load_down(tg->shares); +#else + return sched_weight_from_cgroup(tg->cgrp_weight); +#endif } static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css, @@ -11101,13 +11139,17 @@ static int cpu_weight_write_u64(struct cgroup_subsys_state *css, struct cftype *cft, u64 cgrp_weight) { unsigned long weight; + int ret; if (cgrp_weight < CGROUP_WEIGHT_MIN || cgrp_weight > CGROUP_WEIGHT_MAX) return -ERANGE; weight = sched_weight_from_cgroup(cgrp_weight); - return sched_group_set_shares(css_tg(css), scale_load(weight)); + ret = sched_group_set_shares(css_tg(css), scale_load(weight)); + if (!ret) + scx_group_set_weight(css_tg(css), cgrp_weight); + return ret; } static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css, @@ -11132,7 +11174,7 @@ static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css, struct cftype *cft, s64 nice) { unsigned long weight; - int idx; + int idx, ret; if (nice < MIN_NICE || nice > MAX_NICE) return -ERANGE; @@ -11141,7 +11183,11 @@ static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css, idx = array_index_nospec(idx, 40); weight = sched_prio_to_weight[idx]; - return sched_group_set_shares(css_tg(css), scale_load(weight)); + ret = sched_group_set_shares(css_tg(css), scale_load(weight)); + if (!ret) + scx_group_set_weight(css_tg(css), + sched_weight_to_cgroup(weight)); + return ret; } #endif @@ -11203,7 +11249,7 @@ static ssize_t cpu_max_write(struct kernfs_open_file *of, #endif struct cftype cpu_cftypes[CPU_CFTYPE_CNT + 1] = { -#ifdef CONFIG_FAIR_GROUP_SCHED +#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_EXT_GROUP_SCHED) [CPU_CFTYPE_WEIGHT] = { .name = "weight", .flags = CFTYPE_NOT_ON_ROOT, @@ -11257,13 +11303,17 @@ struct cftype cpu_cftypes[CPU_CFTYPE_CNT + 1] = { struct cgroup_subsys cpu_cgrp_subsys = { .css_alloc = cpu_cgroup_css_alloc, .css_online = cpu_cgroup_css_online, + .css_offline = cpu_cgroup_css_offline, .css_released = cpu_cgroup_css_released, .css_free = cpu_cgroup_css_free, .css_extra_stat_show = cpu_extra_stat_show, -#ifdef CONFIG_RT_GROUP_SCHED +#if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_EXT_GROUP_SCHED) .can_attach = cpu_cgroup_can_attach, #endif .attach = cpu_cgroup_attach, +#ifdef CONFIG_EXT_GROUP_SCHED + .cancel_attach = cpu_cgroup_cancel_attach, +#endif .legacy_cftypes = cpu_legacy_cftypes, .dfl_cftypes = cpu_cftypes, .early_init = true, diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 5e94f9604e23..31e48cff9be6 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -1710,6 +1710,19 @@ static void task_tick_scx(struct rq *rq, struct task_struct *curr, int queued) resched_curr(rq); } +static struct cgroup *tg_cgrp(struct task_group *tg) +{ + /* + * If CGROUP_SCHED is disabled, @tg is NULL. If @tg is an autogroup, + * @tg->css.cgroup is NULL. In both cases, @tg can be treated as the + * root cgroup. + */ + if (tg && tg->css.cgroup) + return tg->css.cgroup; + else + return &cgrp_dfl_root.cgrp; +} + static int scx_ops_prepare_task(struct task_struct *p, struct task_group *tg) { int ret; @@ -1719,7 +1732,7 @@ static int scx_ops_prepare_task(struct task_struct *p, struct task_group *tg) p->scx.disallow = false; if (SCX_HAS_OP(prep_enable)) { - struct scx_enable_args args = { }; + struct scx_enable_args args = { .cgroup = tg_cgrp(tg) }; ret = SCX_CALL_OP_RET(SCX_KF_SLEEPABLE, prep_enable, p, &args); if (unlikely(ret)) { @@ -1759,7 +1772,8 @@ static void scx_ops_enable_task(struct task_struct *p) WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_OPS_PREPPED)); if (SCX_HAS_OP(enable)) { - struct scx_enable_args args = { }; + struct scx_enable_args args = + { .cgroup = tg_cgrp(p->sched_task_group) }; scx_ops.enable(p, &args); } p->scx.flags &= ~SCX_TASK_OPS_PREPPED; @@ -1772,7 +1786,8 @@ static void scx_ops_disable_task(struct task_struct *p) if (p->scx.flags & SCX_TASK_OPS_PREPPED) { if (SCX_HAS_OP(cancel_enable)) { - struct scx_enable_args args = { }; + struct scx_enable_args args = + { .cgroup = tg_cgrp(task_group(p)) }; scx_ops.cancel_enable(p, &args); } p->scx.flags &= ~SCX_TASK_OPS_PREPPED; @@ -1926,6 +1941,165 @@ bool scx_can_stop_tick(struct rq *rq) } #endif +#ifdef CONFIG_EXT_GROUP_SCHED + +DEFINE_STATIC_PERCPU_RWSEM(scx_cgroup_rwsem); + +int scx_tg_online(struct task_group *tg) +{ + int ret = 0; + + WARN_ON_ONCE(tg->scx_flags & (SCX_TG_ONLINE | SCX_TG_INITED)); + + percpu_down_read(&scx_cgroup_rwsem); + + if (SCX_HAS_OP(cgroup_init)) { + struct scx_cgroup_init_args args = { .weight = tg->scx_weight }; + + ret = SCX_CALL_OP_RET(SCX_KF_SLEEPABLE, cgroup_init, + tg->css.cgroup, &args); + if (!ret) + tg->scx_flags |= SCX_TG_ONLINE | SCX_TG_INITED; + else + ret = ops_sanitize_err("cgroup_init", ret); + } else { + tg->scx_flags |= SCX_TG_ONLINE; + } + + percpu_up_read(&scx_cgroup_rwsem); + return ret; +} + +void scx_tg_offline(struct task_group *tg) +{ + WARN_ON_ONCE(!(tg->scx_flags & SCX_TG_ONLINE)); + + percpu_down_read(&scx_cgroup_rwsem); + + if (SCX_HAS_OP(cgroup_exit) && (tg->scx_flags & SCX_TG_INITED)) + scx_ops.cgroup_exit(tg->css.cgroup); + tg->scx_flags &= ~(SCX_TG_ONLINE | SCX_TG_INITED); + + percpu_up_read(&scx_cgroup_rwsem); +} + +int scx_cgroup_can_attach(struct cgroup_taskset *tset) +{ + struct cgroup_subsys_state *css; + struct task_struct *p; + int ret; + + /* released in scx_finish/cancel_attach() */ + percpu_down_read(&scx_cgroup_rwsem); + + if (!scx_enabled()) + return 0; + + cgroup_taskset_for_each(p, css, tset) { + struct cgroup *from = tg_cgrp(p->sched_task_group); + + if (SCX_HAS_OP(cgroup_prep_move)) { + ret = SCX_CALL_OP_RET(SCX_KF_SLEEPABLE, cgroup_prep_move, + p, from, css->cgroup); + if (ret) + goto err; + } + + WARN_ON_ONCE(p->scx.cgrp_moving_from); + p->scx.cgrp_moving_from = from; + } + + return 0; + +err: + cgroup_taskset_for_each(p, css, tset) { + if (!p->scx.cgrp_moving_from) + break; + if (SCX_HAS_OP(cgroup_cancel_move)) + scx_ops.cgroup_cancel_move(p, p->scx.cgrp_moving_from, + css->cgroup); + p->scx.cgrp_moving_from = NULL; + } + + percpu_up_read(&scx_cgroup_rwsem); + return ops_sanitize_err("cgroup_prep_move", ret); +} + +void scx_move_task(struct task_struct *p) +{ + /* + * We're called from sched_move_task() which handles both cgroup and + * autogroup moves. Ignore the latter. + */ + if (task_group_is_autogroup(p->sched_task_group)) + return; + + if (!scx_enabled()) + return; + + if (SCX_HAS_OP(cgroup_move)) { + WARN_ON_ONCE(!p->scx.cgrp_moving_from); + scx_ops.cgroup_move(p, p->scx.cgrp_moving_from, + tg_cgrp(p->sched_task_group)); + } + p->scx.cgrp_moving_from = NULL; +} + +void scx_cgroup_finish_attach(void) +{ + percpu_up_read(&scx_cgroup_rwsem); +} + +void scx_cgroup_cancel_attach(struct cgroup_taskset *tset) +{ + struct cgroup_subsys_state *css; + struct task_struct *p; + + if (!scx_enabled()) + goto out_unlock; + + cgroup_taskset_for_each(p, css, tset) { + if (SCX_HAS_OP(cgroup_cancel_move)) { + WARN_ON_ONCE(!p->scx.cgrp_moving_from); + scx_ops.cgroup_cancel_move(p, p->scx.cgrp_moving_from, + css->cgroup); + } + p->scx.cgrp_moving_from = NULL; + } +out_unlock: + percpu_up_read(&scx_cgroup_rwsem); +} + +void scx_group_set_weight(struct task_group *tg, unsigned long weight) +{ + percpu_down_read(&scx_cgroup_rwsem); + + if (tg->scx_weight != weight) { + if (SCX_HAS_OP(cgroup_set_weight)) + scx_ops.cgroup_set_weight(tg_cgrp(tg), weight); + tg->scx_weight = weight; + } + + percpu_up_read(&scx_cgroup_rwsem); +} + +static void scx_cgroup_lock(void) +{ + percpu_down_write(&scx_cgroup_rwsem); +} + +static void scx_cgroup_unlock(void) +{ + percpu_up_write(&scx_cgroup_rwsem); +} + +#else /* CONFIG_EXT_GROUP_SCHED */ + +static inline void scx_cgroup_lock(void) {} +static inline void scx_cgroup_unlock(void) {} + +#endif /* CONFIG_EXT_GROUP_SCHED */ + /* * Omitted operations: * @@ -2055,6 +2229,131 @@ static void destroy_dsq(u64 dsq_id) rcu_read_unlock(); } +#ifdef CONFIG_EXT_GROUP_SCHED +static void scx_cgroup_exit(void) +{ + struct cgroup_subsys_state *css; + + percpu_rwsem_assert_held(&scx_cgroup_rwsem); + + /* + * scx_tg_on/offline() are excluded through scx_cgroup_rwsem. If we walk + * cgroups and exit all the inited ones, all online cgroups are exited. + */ + rcu_read_lock(); + css_for_each_descendant_post(css, &root_task_group.css) { + struct task_group *tg = css_tg(css); + + if (!(tg->scx_flags & SCX_TG_INITED)) + continue; + tg->scx_flags &= ~SCX_TG_INITED; + + if (!scx_ops.cgroup_exit) + continue; + + if (WARN_ON_ONCE(!css_tryget(css))) + continue; + rcu_read_unlock(); + + scx_ops.cgroup_exit(css->cgroup); + + rcu_read_lock(); + css_put(css); + } + rcu_read_unlock(); +} + +static int scx_cgroup_init(void) +{ + struct cgroup_subsys_state *css; + int ret; + + percpu_rwsem_assert_held(&scx_cgroup_rwsem); + + /* + * scx_tg_on/offline() are excluded thorugh scx_cgroup_rwsem. If we walk + * cgroups and init, all online cgroups are initialized. + */ + rcu_read_lock(); + css_for_each_descendant_pre(css, &root_task_group.css) { + struct task_group *tg = css_tg(css); + struct scx_cgroup_init_args args = { .weight = tg->scx_weight }; + + if ((tg->scx_flags & + (SCX_TG_ONLINE | SCX_TG_INITED)) != SCX_TG_ONLINE) + continue; + + if (!scx_ops.cgroup_init) { + tg->scx_flags |= SCX_TG_INITED; + continue; + } + + if (WARN_ON_ONCE(!css_tryget(css))) + continue; + rcu_read_unlock(); + + ret = SCX_CALL_OP_RET(SCX_KF_SLEEPABLE, cgroup_init, + css->cgroup, &args); + if (ret) { + css_put(css); + return ret; + } + tg->scx_flags |= SCX_TG_INITED; + + rcu_read_lock(); + css_put(css); + } + rcu_read_unlock(); + + return 0; +} + +static void scx_cgroup_config_knobs(void) +{ + static DEFINE_MUTEX(cgintf_mutex); + DECLARE_BITMAP(mask, CPU_CFTYPE_CNT) = { }; + u64 knob_flags; + int i; + + /* + * Called from both class switch and ops enable/disable paths, + * synchronize internally. + */ + mutex_lock(&cgintf_mutex); + + /* if fair is in use, all knobs should be shown */ + if (!scx_switched_all()) { + bitmap_fill(mask, CPU_CFTYPE_CNT); + goto apply; + } + + /* + * On ext, only show the supported knobs. Otherwise, show all possible + * knobs so that configuration attempts succeed and the states are + * remembered while ops is not loaded. + */ + if (scx_enabled()) + knob_flags = scx_ops.flags; + else + knob_flags = SCX_OPS_ALL_FLAGS; + + if (knob_flags & SCX_OPS_CGROUP_KNOB_WEIGHT) { + __set_bit(CPU_CFTYPE_WEIGHT, mask); + __set_bit(CPU_CFTYPE_WEIGHT_NICE, mask); + } +apply: + for (i = 0; i < CPU_CFTYPE_CNT; i++) + cgroup_show_cftype(&cpu_cftypes[i], test_bit(i, mask)); + + mutex_unlock(&cgintf_mutex); +} + +#else +static void scx_cgroup_exit(void) {} +static int scx_cgroup_init(void) { return 0; } +static void scx_cgroup_config_knobs(void) {} +#endif + /* * Used by sched_fork() and __setscheduler_prio() to pick the matching * sched_class. dl/rt are already handled. @@ -2198,9 +2497,10 @@ static void scx_ops_disable_workfn(struct kthread_work *work) static_branch_disable(&__scx_switched_all); WRITE_ONCE(scx_switching_all, false); - /* avoid racing against fork */ + /* avoid racing against fork and cgroup changes */ cpus_read_lock(); percpu_down_write(&scx_fork_rwsem); + scx_cgroup_lock(); spin_lock_irq(&scx_tasks_lock); scx_task_iter_init(&sti); @@ -2237,6 +2537,9 @@ static void scx_ops_disable_workfn(struct kthread_work *work) static_branch_disable_cpuslocked(&scx_builtin_idle_enabled); synchronize_rcu(); + scx_cgroup_exit(); + + scx_cgroup_unlock(); percpu_up_write(&scx_fork_rwsem); cpus_read_unlock(); @@ -2275,6 +2578,8 @@ static void scx_ops_disable_workfn(struct kthread_work *work) WARN_ON_ONCE(scx_ops_set_enable_state(SCX_OPS_DISABLED) != SCX_OPS_DISABLING); + + scx_cgroup_config_knobs(); } static DEFINE_KTHREAD_WORK(scx_ops_disable_work, scx_ops_disable_workfn); @@ -2420,10 +2725,11 @@ static int scx_ops_enable(struct sched_ext_ops *ops) scx_watchdog_timeout / 2); /* - * Lock out forks before opening the floodgate so that they don't wander - * into the operations prematurely. + * Lock out forks, cgroup on/offlining and moves before opening the + * floodgate so that they don't wander into the operations prematurely. */ percpu_down_write(&scx_fork_rwsem); + scx_cgroup_lock(); for (i = 0; i < SCX_NR_ONLINE_OPS; i++) if (((void (**)(void))ops)[i]) @@ -2442,6 +2748,14 @@ static int scx_ops_enable(struct sched_ext_ops *ops) static_branch_disable_cpuslocked(&scx_builtin_idle_enabled); } + /* + * All cgroups should be initialized before letting in tasks. cgroup + * on/offlining and task migrations are already locked out. + */ + ret = scx_cgroup_init(); + if (ret) + goto err_disable_unlock; + static_branch_enable_cpuslocked(&__scx_ops_enabled); /* @@ -2524,6 +2838,7 @@ static int scx_ops_enable(struct sched_ext_ops *ops) spin_unlock_irq(&scx_tasks_lock); preempt_enable(); + scx_cgroup_unlock(); percpu_up_write(&scx_fork_rwsem); if (!scx_ops_tryset_enable_state(SCX_OPS_ENABLED, SCX_OPS_ENABLING)) { @@ -2537,6 +2852,8 @@ static int scx_ops_enable(struct sched_ext_ops *ops) cpus_read_unlock(); mutex_unlock(&scx_ops_enable_mutex); + scx_cgroup_config_knobs(); + return 0; err_unlock: @@ -2544,6 +2861,7 @@ static int scx_ops_enable(struct sched_ext_ops *ops) return ret; err_disable_unlock: + scx_cgroup_unlock(); percpu_up_write(&scx_fork_rwsem); err_disable: cpus_read_unlock(); @@ -2707,6 +3025,9 @@ static int bpf_scx_check_member(const struct btf_type *t, switch (moff) { case offsetof(struct sched_ext_ops, prep_enable): + case offsetof(struct sched_ext_ops, cgroup_init): + case offsetof(struct sched_ext_ops, cgroup_exit): + case offsetof(struct sched_ext_ops, cgroup_prep_move): case offsetof(struct sched_ext_ops, init): case offsetof(struct sched_ext_ops, exit): break; @@ -2805,7 +3126,8 @@ void __init init_sched_ext_class(void) * definitions so that BPF scheduler implementations can use them * through the generated vmlinux.h. */ - WRITE_ONCE(v, SCX_WAKE_EXEC | SCX_ENQ_WAKEUP | SCX_DEQ_SLEEP); + WRITE_ONCE(v, SCX_WAKE_EXEC | SCX_ENQ_WAKEUP | SCX_DEQ_SLEEP | + SCX_TG_ONLINE); BUG_ON(rhashtable_init(&dsq_hash, &dsq_hash_params)); init_dsq(&scx_dsq_global, SCX_DSQ_GLOBAL); @@ -2826,6 +3148,7 @@ void __init init_sched_ext_class(void) register_sysrq_key('S', &sysrq_sched_ext_reset_op); INIT_DELAYED_WORK(&scx_watchdog_work, scx_watchdog_workfn); + scx_cgroup_config_knobs(); } @@ -2869,8 +3192,8 @@ static const struct btf_kfunc_id_set scx_kfunc_set_init = { * @dsq_id: DSQ to create * @node: NUMA node to allocate from * - * Create a custom DSQ identified by @dsq_id. Can be called from ops.init() and - * ops.prep_enable(). + * Create a custom DSQ identified by @dsq_id. Can be called from ops.init(), + * ops.prep_enable(), ops.cgroup_init() and ops.cgroup_prep_move(). */ s32 scx_bpf_create_dsq(u64 dsq_id, s32 node) { diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h index 9c9284f91e38..9a60b81d787e 100644 --- a/kernel/sched/ext.h +++ b/kernel/sched/ext.h @@ -59,6 +59,11 @@ enum scx_deq_flags { SCX_DEQ_SLEEP = DEQUEUE_SLEEP, }; +enum scx_tg_flags { + SCX_TG_ONLINE = 1U << 0, + SCX_TG_INITED = 1U << 1, +}; + enum scx_kick_flags { SCX_KICK_PREEMPT = 1LLU << 0, /* force scheduling on the CPU */ }; @@ -162,3 +167,21 @@ static inline void scx_update_idle(struct rq *rq, bool idle) #else static inline void scx_update_idle(struct rq *rq, bool idle) {} #endif + +#ifdef CONFIG_EXT_GROUP_SCHED +int scx_tg_online(struct task_group *tg); +void scx_tg_offline(struct task_group *tg); +int scx_cgroup_can_attach(struct cgroup_taskset *tset); +void scx_move_task(struct task_struct *p); +void scx_cgroup_finish_attach(void); +void scx_cgroup_cancel_attach(struct cgroup_taskset *tset); +void scx_group_set_weight(struct task_group *tg, unsigned long cgrp_weight); +#else /* CONFIG_EXT_GROUP_SCHED */ +static inline int scx_tg_online(struct task_group *tg) { return 0; } +static inline void scx_tg_offline(struct task_group *tg) {} +static inline int scx_cgroup_can_attach(struct cgroup_taskset *tset) { return 0; } +static inline void scx_move_task(struct task_struct *p) {} +static inline void scx_cgroup_finish_attach(void) {} +static inline void scx_cgroup_cancel_attach(struct cgroup_taskset *tset) {} +static inline void scx_group_set_weight(struct task_group *tg, unsigned long cgrp_weight) {} +#endif /* CONFIG_EXT_GROUP_SCHED */ diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 2447af22783c..abb8ec22b6ef 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -406,6 +406,11 @@ struct task_group { struct rt_bandwidth rt_bandwidth; #endif +#ifdef CONFIG_EXT_GROUP_SCHED + u32 scx_flags; /* SCX_TG_* */ + u32 scx_weight; +#endif + struct rcu_head rcu; struct list_head list; @@ -530,6 +535,11 @@ extern void set_task_rq_fair(struct sched_entity *se, static inline void set_task_rq_fair(struct sched_entity *se, struct cfs_rq *prev, struct cfs_rq *next) { } #endif /* CONFIG_SMP */ +#else /* CONFIG_FAIR_GROUP_SCHED */ +static inline int sched_group_set_shares(struct task_group *tg, unsigned long shares) +{ + return 0; +} #endif /* CONFIG_FAIR_GROUP_SCHED */ #else /* CONFIG_CGROUP_SCHED */ @@ -3372,7 +3382,7 @@ static inline void update_current_exec_runtime(struct task_struct *curr, #ifdef CONFIG_CGROUP_SCHED enum cpu_cftype_id { -#ifdef CONFIG_FAIR_GROUP_SCHED +#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_EXT_GROUP_SCHED) CPU_CFTYPE_WEIGHT, CPU_CFTYPE_WEIGHT_NICE, CPU_CFTYPE_IDLE, diff --git a/tools/sched_ext/.gitignore b/tools/sched_ext/.gitignore index 389f0e5b0970..ebc34dcf925b 100644 --- a/tools/sched_ext/.gitignore +++ b/tools/sched_ext/.gitignore @@ -1,6 +1,7 @@ scx_example_dummy scx_example_qmap scx_example_central +scx_example_pair *.skel.h *.subskel.h /tools/ diff --git a/tools/sched_ext/Makefile b/tools/sched_ext/Makefile index c6c3669c47b9..2303736698a2 100644 --- a/tools/sched_ext/Makefile +++ b/tools/sched_ext/Makefile @@ -115,7 +115,7 @@ BPF_CFLAGS = -g -D__TARGET_ARCH_$(SRCARCH) \ -Wno-compare-distinct-pointer-types \ -O2 -mcpu=v3 -all: scx_example_dummy scx_example_qmap scx_example_central +all: scx_example_dummy scx_example_qmap scx_example_central scx_example_pair # sort removes libbpf duplicates when not cross-building MAKE_DIRS := $(sort $(BUILD_DIR)/libbpf $(HOST_BUILD_DIR)/libbpf \ @@ -178,10 +178,15 @@ scx_example_central: scx_example_central.c scx_example_central.skel.h user_exit_ $(CC) $(CFLAGS) -c $< -o $@.o $(CC) -o $@ $@.o $(HOST_BPFOBJ) $(LDFLAGS) +scx_example_pair: scx_example_pair.c scx_example_pair.skel.h user_exit_info.h + $(CC) $(CFLAGS) -c $< -o $@.o + $(CC) -o $@ $@.o $(HOST_BPFOBJ) $(LDFLAGS) + clean: rm -rf $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) rm -f *.o *.bpf.o *.skel.h *.subskel.h - rm -f scx_example_dummy scx_example_qmap scx_example_central + rm -f scx_example_dummy scx_example_qmap scx_example_central \ + scx_example_pair .PHONY: all clean diff --git a/tools/sched_ext/scx_example_pair.bpf.c b/tools/sched_ext/scx_example_pair.bpf.c new file mode 100644 index 000000000000..8e277225b044 --- /dev/null +++ b/tools/sched_ext/scx_example_pair.bpf.c @@ -0,0 +1,555 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * A demo sched_ext core-scheduler which always makes every sibling CPU pair + * execute from the same CPU cgroup. + * + * Each CPU in the system is paired with exactly one other CPU, according to a + * "stride" value that can be specified when the BPF scheduler program is first + * loaded. Throughout the runtime of the scheduler, these CPU pairs guarantee + * that they will only ever schedule tasks that belong to the same CPU cgroup. + * + * Scheduler Initialization + * ------------------------ + * + * The scheduler BPF program is first initialized from user space, before it is + * enabled. During this initialization process, each CPU on the system is + * assigned several values that are constant throughout its runtime: + * + * 1. *Pair CPU*: The CPU that it synchronizes with when making scheduling + * decisions. Paired CPUs always schedule tasks from the same + * CPU cgroup, and synchronize with each other to guarantee + * that this constraint is not violated. + * 2. *Pair ID*: Each CPU pair is assigned a Pair ID, which is used to access + * a struct pair_ctx object that is shared between the pair. + * 3. *In-pair-index*: An index, 0 or 1, that is assigned to each core in the + * pair. Each struct pair_ctx has an active_mask field, + * which is a bitmap used to indicate whether each core + * in the pair currently has an actively running task. + * This index specifies which entry in the bitmap corresponds + * to each CPU in the pair. + * + * During this initialization, the CPUs are paired according to a "stride" that + * may be specified when invoking the user space program that initializes and + * loads the scheduler. By default, the stride is 1/2 the total number of CPUs. + * + * Tasks and cgroups + * ----------------- + * + * Every cgroup in the system is registered with the scheduler using the + * pair_cgroup_init() callback, and every task in the system is associated with + * exactly one cgroup. At a high level, the idea with the pair scheduler is to + * always schedule tasks from the same cgroup within a given CPU pair. When a + * task is enqueued (i.e. passed to the pair_enqueue() callback function), its + * cgroup ID is read from its task struct, and then a corresponding queue map + * is used to FIFO-enqueue the task for that cgroup. + * + * If you look through the implementation of the scheduler, you'll notice that + * there is quite a bit of complexity involved with looking up the per-cgroup + * FIFO queue that we enqueue tasks in. For example, there is a cgrp_q_idx_hash + * BPF hash map that is used to map a cgroup ID to a globally unique ID that's + * allocated in the BPF program. This is done because we use separate maps to + * store the FIFO queue of tasks, and the length of that map, per cgroup. This + * complexity is only present because of current deficiencies in BPF that will + * soon be addressed. The main point to keep in mind is that newly enqueued + * tasks are added to their cgroup's FIFO queue. + * + * Dispatching tasks + * ----------------- + * + * This section will describe how enqueued tasks are dispatched and scheduled. + * Tasks are dispatched in pair_dispatch(), and at a high level the workflow is + * as follows: + * + * 1. Fetch the struct pair_ctx for the current CPU. As mentioned above, this is + * the structure that's used to synchronize amongst the two pair CPUs in their + * scheduling decisions. After any of the following events have occurred: + * + * - The cgroup's slice run has expired, or + * - The cgroup becomes empty, or + * - Either CPU in the pair is preempted by a higher priority scheduling class + * + * The cgroup transitions to the draining state and stops executing new tasks + * from the cgroup. + * + * 2. If the pair is still executing a task, mark the pair_ctx as draining, and + * wait for the pair CPU to be preempted. + * + * 3. Otherwise, if the pair CPU is not running a task, we can move onto + * scheduling new tasks. Pop the next cgroup id from the top_q queue. + * + * 4. Pop a task from that cgroup's FIFO task queue, and begin executing it. + * + * Note again that this scheduling behavior is simple, but the implementation + * is complex mostly because this it hits several BPF shortcomings and has to + * work around in often awkward ways. Most of the shortcomings are expected to + * be resolved in the near future which should allow greatly simplifying this + * scheduler. + * + * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2022 Tejun Heo + * Copyright (c) 2022 David Vernet + */ +#include "scx_common.bpf.h" +#include "scx_example_pair.h" + +char _license[] SEC("license") = "GPL"; + +const volatile bool switch_all; + +const volatile u32 nr_cpu_ids; + +/* a pair of CPUs stay on a cgroup for this duration */ +const volatile u32 pair_batch_dur_ns = SCX_SLICE_DFL; + +/* cpu ID -> pair cpu ID */ +const volatile s32 pair_cpu[MAX_CPUS] = { [0 ... MAX_CPUS - 1] = -1 }; + +/* cpu ID -> pair_id */ +const volatile u32 pair_id[MAX_CPUS]; + +/* CPU ID -> CPU # in the pair (0 or 1) */ +const volatile u32 in_pair_idx[MAX_CPUS]; + +struct pair_ctx { + struct bpf_spin_lock lock; + + /* the cgroup the pair is currently executing */ + u64 cgid; + + /* the pair started executing the current cgroup at */ + u64 started_at; + + /* whether the current cgroup is draining */ + bool draining; + + /* the CPUs that are currently active on the cgroup */ + u32 active_mask; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, MAX_CPUS / 2); + __type(key, u32); + __type(value, struct pair_ctx); +} pair_ctx SEC(".maps"); + +/* queue of cgrp_q's possibly with tasks on them */ +struct { + __uint(type, BPF_MAP_TYPE_QUEUE); + /* + * Because it's difficult to build strong synchronization encompassing + * multiple non-trivial operations in BPF, this queue is managed in an + * opportunistic way so that we guarantee that a cgroup w/ active tasks + * is always on it but possibly multiple times. Once we have more robust + * synchronization constructs and e.g. linked list, we should be able to + * do this in a prettier way but for now just size it big enough. + */ + __uint(max_entries, 4 * MAX_CGRPS); + __type(value, u64); +} top_q SEC(".maps"); + +/* per-cgroup q which FIFOs the tasks from the cgroup */ +struct cgrp_q { + __uint(type, BPF_MAP_TYPE_QUEUE); + __uint(max_entries, MAX_QUEUED); + __type(value, u32); +}; + +/* + * Ideally, we want to allocate cgrp_q and cgrq_q_len in the cgroup local + * storage; however, a cgroup local storage can only be accessed from the BPF + * progs attached to the cgroup. For now, work around by allocating array of + * cgrp_q's and then allocating per-cgroup indices. + * + * Another caveat: It's difficult to populate a large array of maps statically + * or from BPF. Initialize it from userland. + */ +struct { + __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); + __uint(max_entries, MAX_CGRPS); + __type(key, s32); + __array(values, struct cgrp_q); +} cgrp_q_arr SEC(".maps"); + +static u64 cgrp_q_len[MAX_CGRPS]; + +/* + * This and cgrp_q_idx_hash combine into a poor man's IDR. This likely would be + * useful to have as a map type. + */ +static u32 cgrp_q_idx_cursor; +static u64 cgrp_q_idx_busy[MAX_CGRPS]; + +/* + * All added up, the following is what we do: + * + * 1. When a cgroup is enabled, RR cgroup_q_idx_busy array doing cmpxchg looking + * for a free ID. If not found, fail cgroup creation with -EBUSY. + * + * 2. Hash the cgroup ID to the allocated cgrp_q_idx in the following + * cgrp_q_idx_hash. + * + * 3. Whenever a cgrp_q needs to be accessed, first look up the cgrp_q_idx from + * cgrp_q_idx_hash and then access the corresponding entry in cgrp_q_arr. + * + * This is sadly complicated for something pretty simple. Hopefully, we should + * be able to simplify in the future. + */ +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, MAX_CGRPS); + __uint(key_size, sizeof(u64)); /* cgrp ID */ + __uint(value_size, sizeof(s32)); /* cgrp_q idx */ +} cgrp_q_idx_hash SEC(".maps"); + +/* statistics */ +u64 nr_total, nr_dispatched, nr_missing, nr_kicks, nr_preemptions; +u64 nr_exps, nr_exp_waits, nr_exp_empty; +u64 nr_cgrp_next, nr_cgrp_coll, nr_cgrp_empty; + +struct user_exit_info uei; + +static bool time_before(u64 a, u64 b) +{ + return (s64)(a - b) < 0; +} + +void BPF_STRUCT_OPS(pair_enqueue, struct task_struct *p, u64 enq_flags) +{ + s32 pid = p->pid; + u64 cgid = p->sched_task_group->css.cgroup->kn->id; + u32 *q_idx; + struct cgrp_q *cgq; + u64 *cgq_len; + + __sync_fetch_and_add(&nr_total, 1); + + /* find the cgroup's q and push @p into it */ + q_idx = bpf_map_lookup_elem(&cgrp_q_idx_hash, &cgid); + if (!q_idx) { + scx_bpf_error("failed to lookup q_idx for cgroup[%llu]", cgid); + return; + } + + cgq = bpf_map_lookup_elem(&cgrp_q_arr, q_idx); + if (!cgq) { + scx_bpf_error("failed to lookup q_arr for cgroup[%llu] q_idx[%u]", + cgid, *q_idx); + return; + } + + if (bpf_map_push_elem(cgq, &pid, 0)) { + scx_bpf_error("cgroup[%llu] queue overflow", cgid); + return; + } + + /* bump q len, if going 0 -> 1, queue cgroup into the top_q */ + cgq_len = MEMBER_VPTR(cgrp_q_len, [*q_idx]); + if (!cgq_len) { + scx_bpf_error("MEMBER_VTPR malfunction"); + return; + } + + if (!__sync_fetch_and_add(cgq_len, 1) && + bpf_map_push_elem(&top_q, &cgid, 0)) { + scx_bpf_error("top_q overflow"); + return; + } +} + +/* find the next cgroup to execute and return it in *data */ +static int next_cgid_loopfn(u32 idx, void *data) +{ + u64 cgid; + u32 *q_idx; + u64 *cgq_len; + + if (bpf_map_pop_elem(&top_q, &cgid)) + return 1; + + q_idx = bpf_map_lookup_elem(&cgrp_q_idx_hash, &cgid); + if (!q_idx) + return 0; + + /* this is the only place where empty cgroups are taken off the top_q */ + cgq_len = MEMBER_VPTR(cgrp_q_len, [*q_idx]); + if (!cgq_len || !*cgq_len) + return 0; + + /* if it has any tasks, requeue as we may race and not execute it */ + bpf_map_push_elem(&top_q, &cgid, 0); + *(u64 *)data = cgid; + return 1; +} + +struct claim_task_loopctx { + u32 q_idx; + bool claimed; +}; + +/* claim one task from the specified cgq */ +static int claim_task_loopfn(u32 idx, void *data) +{ + struct claim_task_loopctx *claimc = data; + u64 *cgq_len; + u64 len; + + cgq_len = MEMBER_VPTR(cgrp_q_len, [claimc->q_idx]); + if (!cgq_len) + return 1; + + len = *cgq_len; + if (!len) + return 1; + + if (__sync_val_compare_and_swap(cgq_len, len, len - 1) != len) + return 0; + + claimc->claimed = true; + return 1; +} + +static int lookup_pairc_and_mask(s32 cpu, struct pair_ctx **pairc, u32 *mask) +{ + u32 *vptr, in_pair_mask; + int err; + + vptr = (u32 *)MEMBER_VPTR(pair_id, [cpu]); + if (!vptr) + return -EINVAL; + + *pairc = bpf_map_lookup_elem(&pair_ctx, vptr); + if (!(*pairc)) + return -EINVAL; + + vptr = (u32 *)MEMBER_VPTR(in_pair_idx, [cpu]); + if (!vptr) + return -EINVAL; + + *mask = 1U << *vptr; + + return 0; +} + +static int dispatch_loopfn(u32 idx, void *data) +{ + s32 cpu = *(s32 *)data; + struct pair_ctx *pairc; + struct bpf_map *cgq_map; + struct claim_task_loopctx claimc; + struct task_struct *p; + u64 now = bpf_ktime_get_ns(); + bool kick_pair = false; + bool expired; + u32 *vptr, in_pair_mask; + s32 pid; + u64 cgid; + int ret; + + ret = lookup_pairc_and_mask(cpu, &pairc, &in_pair_mask); + if (ret) { + scx_bpf_error("failed to lookup pairc and in_pair_mask for cpu[%d]", + cpu); + return 1; + } + + bpf_spin_lock(&pairc->lock); + pairc->active_mask &= ~in_pair_mask; + + expired = time_before(pairc->started_at + pair_batch_dur_ns, now); + if (expired || pairc->draining) { + u64 new_cgid = 0; + + __sync_fetch_and_add(&nr_exps, 1); + + /* + * We're done with the current cgid. An obvious optimization + * would be not draining if the next cgroup is the current one. + * For now, be dumb and always expire. + */ + pairc->draining = true; + + if (pairc->active_mask) { + /* + * The other CPU is still active We want to wait until + * this cgroup expires. + * + * If the pair controls its CPU, and the time already + * expired, kick. When the other CPU arrives at + * dispatch and clears its active mask, it'll push the + * pair to the next cgroup and kick this CPU. + */ + __sync_fetch_and_add(&nr_exp_waits, 1); + bpf_spin_unlock(&pairc->lock); + if (expired) + kick_pair = true; + goto out_maybe_kick; + } + + bpf_spin_unlock(&pairc->lock); + + /* + * Pick the next cgroup. It'd be easier / cleaner to not drop + * pairc->lock and use stronger synchronization here especially + * given that we'll be switching cgroups significantly less + * frequently than tasks. Unfortunately, bpf_spin_lock can't + * really protect anything non-trivial. Let's do opportunistic + * operations instead. + */ + bpf_loop(1 << 23, next_cgid_loopfn, &new_cgid, 0); + /* no active cgroup, go idle */ + if (!new_cgid) { + __sync_fetch_and_add(&nr_exp_empty, 1); + return 1; + } + + bpf_spin_lock(&pairc->lock); + + /* + * The other CPU may already have started on a new cgroup while + * we dropped the lock. Make sure that we're still draining and + * start on the new cgroup. + */ + if (pairc->draining && !pairc->active_mask) { + __sync_fetch_and_add(&nr_cgrp_next, 1); + pairc->cgid = new_cgid; + pairc->started_at = now; + pairc->draining = false; + kick_pair = true; + } else { + __sync_fetch_and_add(&nr_cgrp_coll, 1); + } + } + + cgid = pairc->cgid; + pairc->active_mask |= in_pair_mask; + bpf_spin_unlock(&pairc->lock); + + /* again, it'd be better to do all these with the lock held, oh well */ + vptr = bpf_map_lookup_elem(&cgrp_q_idx_hash, &cgid); + if (!vptr) { + scx_bpf_error("failed to lookup q_idx for cgroup[%llu]", cgid); + return 1; + } + + claimc = (struct claim_task_loopctx){ .q_idx = *vptr }; + bpf_loop(1 << 23, claim_task_loopfn, &claimc, 0); + if (!claimc.claimed) { + /* the cgroup must be empty, expire and repeat */ + __sync_fetch_and_add(&nr_cgrp_empty, 1); + bpf_spin_lock(&pairc->lock); + pairc->draining = true; + pairc->active_mask &= ~in_pair_mask; + bpf_spin_unlock(&pairc->lock); + return 0; + } + + cgq_map = bpf_map_lookup_elem(&cgrp_q_arr, &claimc.q_idx); + if (!cgq_map) { + scx_bpf_error("failed to lookup cgq_map for cgroup[%llu] q_idx[%d]", + cgid, claimc.q_idx); + return 1; + } + + if (bpf_map_pop_elem(cgq_map, &pid)) { + scx_bpf_error("cgq_map is empty for cgroup[%llu] q_idx[%d]", + cgid, claimc.q_idx); + return 1; + } + + p = bpf_task_from_pid(pid); + if (p) { + __sync_fetch_and_add(&nr_dispatched, 1); + scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0); + bpf_task_release(p); + } else { + /* we don't handle dequeues, retry on lost tasks */ + __sync_fetch_and_add(&nr_missing, 1); + return 0; + } + +out_maybe_kick: + if (kick_pair) { + s32 *pair = (s32 *)MEMBER_VPTR(pair_cpu, [cpu]); + if (pair) { + __sync_fetch_and_add(&nr_kicks, 1); + scx_bpf_kick_cpu(*pair, SCX_KICK_PREEMPT); + } + } + return 1; +} + +void BPF_STRUCT_OPS(pair_dispatch, s32 cpu, struct task_struct *prev) +{ + s32 cpu_on_stack = cpu; + + bpf_loop(1 << 23, dispatch_loopfn, &cpu_on_stack, 0); +} + +static int alloc_cgrp_q_idx_loopfn(u32 idx, void *data) +{ + u32 q_idx; + + q_idx = __sync_fetch_and_add(&cgrp_q_idx_cursor, 1) % MAX_CGRPS; + if (!__sync_val_compare_and_swap(&cgrp_q_idx_busy[q_idx], 0, 1)) { + *(s32 *)data = q_idx; + return 1; + } + return 0; +} + +s32 BPF_STRUCT_OPS(pair_cgroup_init, struct cgroup *cgrp) +{ + u64 cgid = cgrp->kn->id; + s32 q_idx = -1; + + bpf_loop(MAX_CGRPS, alloc_cgrp_q_idx_loopfn, &q_idx, 0); + if (q_idx < 0) + return -EBUSY; + + if (bpf_map_update_elem(&cgrp_q_idx_hash, &cgid, &q_idx, BPF_ANY)) { + u64 *busy = MEMBER_VPTR(cgrp_q_idx_busy, [q_idx]); + if (busy) + *busy = 0; + return -EBUSY; + } + + return 0; +} + +void BPF_STRUCT_OPS(pair_cgroup_exit, struct cgroup *cgrp) +{ + u64 cgid = cgrp->kn->id; + s32 *q_idx; + + q_idx = bpf_map_lookup_elem(&cgrp_q_idx_hash, &cgid); + if (q_idx) { + u64 *busy = MEMBER_VPTR(cgrp_q_idx_busy, [*q_idx]); + if (busy) + *busy = 0; + bpf_map_delete_elem(&cgrp_q_idx_hash, &cgid); + } +} + +s32 BPF_STRUCT_OPS(pair_init) +{ + if (switch_all) + scx_bpf_switch_all(); + return 0; +} + +void BPF_STRUCT_OPS(pair_exit, struct scx_exit_info *ei) +{ + uei_record(&uei, ei); +} + +SEC(".struct_ops") +struct sched_ext_ops pair_ops = { + .enqueue = (void *)pair_enqueue, + .dispatch = (void *)pair_dispatch, + .cgroup_init = (void *)pair_cgroup_init, + .cgroup_exit = (void *)pair_cgroup_exit, + .init = (void *)pair_init, + .exit = (void *)pair_exit, + .name = "pair", +}; diff --git a/tools/sched_ext/scx_example_pair.c b/tools/sched_ext/scx_example_pair.c new file mode 100644 index 000000000000..255ea7b1235d --- /dev/null +++ b/tools/sched_ext/scx_example_pair.c @@ -0,0 +1,143 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2022 Tejun Heo + * Copyright (c) 2022 David Vernet + */ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include "user_exit_info.h" +#include "scx_example_pair.h" +#include "scx_example_pair.skel.h" + +const char help_fmt[] = +"A demo sched_ext core-scheduler which always makes every sibling CPU pair\n" +"execute from the same CPU cgroup.\n" +"\n" +"See the top-level comment in .bpf.c for more details.\n" +"\n" +"Usage: %s [-a] [-S STRIDE]\n" +"\n" +" -a Switch all tasks\n" +" -S STRIDE Override CPU pair stride (default: nr_cpus_ids / 2)\n" +" -h Display this help and exit\n"; + +static volatile int exit_req; + +static void sigint_handler(int dummy) +{ + exit_req = 1; +} + +int main(int argc, char **argv) +{ + struct scx_example_pair *skel; + struct bpf_link *link; + u64 seq = 0; + s32 stride, i, opt, outer_fd; + + signal(SIGINT, sigint_handler); + signal(SIGTERM, sigint_handler); + + libbpf_set_strict_mode(LIBBPF_STRICT_ALL); + + skel = scx_example_pair__open(); + assert(skel); + + skel->rodata->nr_cpu_ids = libbpf_num_possible_cpus(); + + /* pair up the earlier half to the latter by default, override with -s */ + stride = skel->rodata->nr_cpu_ids / 2; + + while ((opt = getopt(argc, argv, "ahS:")) != -1) { + switch (opt) { + case 'a': + skel->rodata->switch_all = true; + break; + case 'S': + stride = strtoul(optarg, NULL, 0); + break; + default: + fprintf(stderr, help_fmt, basename(argv[0])); + return opt != 'h'; + } + } + + for (i = 0; i < skel->rodata->nr_cpu_ids; i++) { + if (skel->rodata->pair_cpu[i] < 0) { + skel->rodata->pair_cpu[i] = i + stride; + skel->rodata->pair_cpu[i + stride] = i; + skel->rodata->pair_id[i] = i; + skel->rodata->pair_id[i + stride] = i; + skel->rodata->in_pair_idx[i] = 0; + skel->rodata->in_pair_idx[i + stride] = 1; + } + } + + assert(!scx_example_pair__load(skel)); + + /* + * Populate the cgrp_q_arr map which is an array containing per-cgroup + * queues. It'd probably be better to do this from BPF but there are too + * many to initialize statically and there's no way to dynamically + * populate from BPF. + */ + outer_fd = bpf_map__fd(skel->maps.cgrp_q_arr); + assert(outer_fd >= 0); + + printf("Initializing"); + for (i = 0; i < MAX_CGRPS; i++) { + s32 inner_fd; + + if (exit_req) + break; + + inner_fd = bpf_map_create(BPF_MAP_TYPE_QUEUE, NULL, 0, + sizeof(u32), MAX_QUEUED, NULL); + assert(inner_fd >= 0); + assert(!bpf_map_update_elem(outer_fd, &i, &inner_fd, BPF_ANY)); + close(inner_fd); + + if (!(i % 10)) + printf("."); + fflush(stdout); + } + printf("\n"); + + /* + * Fully initialized, attach and run. + */ + link = bpf_map__attach_struct_ops(skel->maps.pair_ops); + assert(link); + + while (!exit_req && !uei_exited(&skel->bss->uei)) { + printf("[SEQ %lu]\n", seq++); + printf(" total:%10lu dispatch:%10lu missing:%10lu\n", + skel->bss->nr_total, + skel->bss->nr_dispatched, + skel->bss->nr_missing); + printf(" kicks:%10lu preemptions:%7lu\n", + skel->bss->nr_kicks, + skel->bss->nr_preemptions); + printf(" exp:%10lu exp_wait:%10lu exp_empty:%10lu\n", + skel->bss->nr_exps, + skel->bss->nr_exp_waits, + skel->bss->nr_exp_empty); + printf("cgnext:%10lu cgcoll:%10lu cgempty:%10lu\n", + skel->bss->nr_cgrp_next, + skel->bss->nr_cgrp_coll, + skel->bss->nr_cgrp_empty); + fflush(stdout); + sleep(1); + } + + bpf_link__destroy(link); + uei_print(&skel->bss->uei); + scx_example_pair__destroy(skel); + return 0; +} diff --git a/tools/sched_ext/scx_example_pair.h b/tools/sched_ext/scx_example_pair.h new file mode 100644 index 000000000000..f60b824272f7 --- /dev/null +++ b/tools/sched_ext/scx_example_pair.h @@ -0,0 +1,10 @@ +#ifndef __SCX_EXAMPLE_PAIR_H +#define __SCX_EXAMPLE_PAIR_H + +enum { + MAX_CPUS = 4096, + MAX_QUEUED = 4096, + MAX_CGRPS = 4096, +}; + +#endif /* __SCX_EXAMPLE_PAIR_H */ From patchwork Sat Jan 28 00:16:33 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 13119578 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 3E45DC636CC for ; Sat, 28 Jan 2023 00:19:30 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229762AbjA1AT2 (ORCPT ); Fri, 27 Jan 2023 19:19:28 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:36696 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S233133AbjA1ASS (ORCPT ); Fri, 27 Jan 2023 19:18:18 -0500 Received: from mail-pj1-x1034.google.com (mail-pj1-x1034.google.com [IPv6:2607:f8b0:4864:20::1034]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 089FE7DBD4; Fri, 27 Jan 2023 16:17:41 -0800 (PST) Received: by mail-pj1-x1034.google.com with SMTP id lp10so6129610pjb.4; Fri, 27 Jan 2023 16:17:40 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=rMQOIFEBcFwDp2btYCdWarnv+32LvEIs5gf0cXKu4U4=; b=k0feCFL4O4unl6y3z3K+xvN790aOc1J4WbsMP8/rFMHX6rXD7lYkSitIXg2IjM0IbM oDWOELrtkcuqYs8suWxc+fd5Hv9SlcT0YunNxepPEyycsJYERseFMOqa4YopxfV/whNj PpRrCb/Sfd1AEzzCxCANK6M8fOmrm4v4PfIKJ5ZBPEjuSBRxSCNWB/Tle/DLe/uqhT8z YSd1tYjbBYWU+te9jMwqv9e+0T7RGBlgMEgtGGoNcY67Cjl72mSduVwFve/8zqXSEpPO kjIqJ9QqKFkrslsqL3YstaqjOtjI6Gme7Ok5vQ8sKP6olrAik3a01FrqOCcVQVdvZoYO YHQA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=rMQOIFEBcFwDp2btYCdWarnv+32LvEIs5gf0cXKu4U4=; b=iJKkGJoUvNXuQqJPLuBOtm2iHIHxz4VtgMiP45gWTBUxrFw4b6h2PdZ9NpvL7n5ebt JA7oRkVEiQukbKDuup3mJtmn6Y3uPApkXYadtbANFcWYn4ETehuiK6njifbvZuORdlUO BveTDOUu9aI1MtEkTDbUBfDRaZxeyJ6fe5olwI18pMpSQ6p7dinhkU6mORlPOksfeMrT bsSeZI2xK3/aobdwHNCD44ECnJlWbhxLyQQilYBV5PaVHXm1LoQmSIoWvoA0lN5D26DG 5dTpcA3FgFtxnEgcX1hPDzOgv0kUuci2oXuFtHowZ3dZpimvNjYuqj925L+AMtYIO3c0 ZgeQ== X-Gm-Message-State: AO0yUKX5NGz2V4S7bDTDb7McyDqpvEF8euuLqgbx07+6WX9ZGHDFmOoe SIK7y6mRpXpxTxSBhhDgLPE= X-Google-Smtp-Source: AK7set+zyY1sVSAjfb6ZLW0s6pCeAipZeTMNNUeuZ5MUXvW8tYPVle6S6mnqNN4saJjMDsZouZl9Cg== X-Received: by 2002:a05:6a20:959b:b0:b8:7e6d:5b6a with SMTP id iu27-20020a056a20959b00b000b87e6d5b6amr8447222pzb.49.1674865056906; Fri, 27 Jan 2023 16:17:36 -0800 (PST) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id jm21-20020a17090304d500b00196519d8647sm2169486plb.4.2023.01.27.16.17.36 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 27 Jan 2023 16:17:36 -0800 (PST) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 24/30] sched_ext: Implement SCX_KICK_WAIT Date: Fri, 27 Jan 2023 14:16:33 -1000 Message-Id: <20230128001639.3510083-25-tj@kernel.org> X-Mailer: git-send-email 2.39.1 In-Reply-To: <20230128001639.3510083-1-tj@kernel.org> References: <20230128001639.3510083-1-tj@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org From: David Vernet If set when calling scx_bpf_kick_cpu(), the invoking CPU will busy wait for the kicked cpu to enter the scheduler. This will be used to improve the exclusion guarantees in scx_example_pair. Signed-off-by: David Vernet Reviewed-by: Tejun Heo Signed-off-by: Tejun Heo Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- kernel/sched/core.c | 4 +++- kernel/sched/ext.c | 33 ++++++++++++++++++++++++++++++++- kernel/sched/ext.h | 20 ++++++++++++++++++++ kernel/sched/sched.h | 2 ++ 4 files changed, 57 insertions(+), 2 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index a2a241bc24e7..47334e428031 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5952,8 +5952,10 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) for_each_active_class(class) { p = class->pick_next_task(rq); - if (p) + if (p) { + scx_notify_pick_next_task(rq, p, class); return p; + } } BUG(); /* The idle class should always have a runnable task. */ diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 31e48cff9be6..d1de6a44c4f5 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -126,6 +126,9 @@ static struct { static bool __cacheline_aligned_in_smp scx_has_idle_cpus; #endif /* CONFIG_SMP */ +/* for %SCX_KICK_WAIT */ +static u64 __percpu *scx_kick_cpus_pnt_seqs; + /* * Direct dispatch marker. * @@ -3093,6 +3096,7 @@ static const struct sysrq_key_op sysrq_sched_ext_reset_op = { static void kick_cpus_irq_workfn(struct irq_work *irq_work) { struct rq *this_rq = this_rq(); + u64 *pseqs = this_cpu_ptr(scx_kick_cpus_pnt_seqs); int this_cpu = cpu_of(this_rq); int cpu; @@ -3106,14 +3110,32 @@ static void kick_cpus_irq_workfn(struct irq_work *irq_work) if (cpumask_test_cpu(cpu, this_rq->scx.cpus_to_preempt) && rq->curr->sched_class == &ext_sched_class) rq->curr->scx.slice = 0; + pseqs[cpu] = rq->scx.pnt_seq; resched_curr(rq); + } else { + cpumask_clear_cpu(cpu, this_rq->scx.cpus_to_wait); } raw_spin_rq_unlock_irqrestore(rq, flags); } + for_each_cpu_andnot(cpu, this_rq->scx.cpus_to_wait, + cpumask_of(this_cpu)) { + /* + * Pairs with smp_store_release() issued by this CPU in + * scx_notify_pick_next_task() on the resched path. + * + * We busy-wait here to guarantee that no other task can be + * scheduled on our core before the target CPU has entered the + * resched path. + */ + while (smp_load_acquire(&cpu_rq(cpu)->scx.pnt_seq) == pseqs[cpu]) + cpu_relax(); + } + cpumask_clear(this_rq->scx.cpus_to_kick); cpumask_clear(this_rq->scx.cpus_to_preempt); + cpumask_clear(this_rq->scx.cpus_to_wait); } void __init init_sched_ext_class(void) @@ -3127,7 +3149,7 @@ void __init init_sched_ext_class(void) * through the generated vmlinux.h. */ WRITE_ONCE(v, SCX_WAKE_EXEC | SCX_ENQ_WAKEUP | SCX_DEQ_SLEEP | - SCX_TG_ONLINE); + SCX_TG_ONLINE | SCX_KICK_PREEMPT); BUG_ON(rhashtable_init(&dsq_hash, &dsq_hash_params)); init_dsq(&scx_dsq_global, SCX_DSQ_GLOBAL); @@ -3135,6 +3157,12 @@ void __init init_sched_ext_class(void) BUG_ON(!alloc_cpumask_var(&idle_masks.cpu, GFP_KERNEL)); BUG_ON(!alloc_cpumask_var(&idle_masks.smt, GFP_KERNEL)); #endif + scx_kick_cpus_pnt_seqs = + __alloc_percpu(sizeof(scx_kick_cpus_pnt_seqs[0]) * + num_possible_cpus(), + __alignof__(scx_kick_cpus_pnt_seqs[0])); + BUG_ON(!scx_kick_cpus_pnt_seqs); + for_each_possible_cpu(cpu) { struct rq *rq = cpu_rq(cpu); @@ -3143,6 +3171,7 @@ void __init init_sched_ext_class(void) BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_kick, GFP_KERNEL)); BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_preempt, GFP_KERNEL)); + BUG_ON(!zalloc_cpumask_var(&rq->scx.cpus_to_wait, GFP_KERNEL)); init_irq_work(&rq->scx.kick_cpus_irq_work, kick_cpus_irq_workfn); } @@ -3397,6 +3426,8 @@ void scx_bpf_kick_cpu(s32 cpu, u64 flags) cpumask_set_cpu(cpu, rq->scx.cpus_to_kick); if (flags & SCX_KICK_PREEMPT) cpumask_set_cpu(cpu, rq->scx.cpus_to_preempt); + if (flags & SCX_KICK_WAIT) + cpumask_set_cpu(cpu, rq->scx.cpus_to_wait); irq_work_queue(&rq->scx.kick_cpus_irq_work); preempt_enable(); diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h index 9a60b81d787e..39eb1b25ec99 100644 --- a/kernel/sched/ext.h +++ b/kernel/sched/ext.h @@ -66,6 +66,7 @@ enum scx_tg_flags { enum scx_kick_flags { SCX_KICK_PREEMPT = 1LLU << 0, /* force scheduling on the CPU */ + SCX_KICK_WAIT = 1LLU << 1, /* wait for the CPU to be rescheduled */ }; #ifdef CONFIG_SCHED_CLASS_EXT @@ -95,6 +96,22 @@ __printf(2, 3) void scx_ops_error_type(enum scx_exit_type type, #define scx_ops_error(fmt, args...) \ scx_ops_error_type(SCX_EXIT_ERROR, fmt, ##args) +static inline void scx_notify_pick_next_task(struct rq *rq, + const struct task_struct *p, + const struct sched_class *active) +{ +#ifdef CONFIG_SMP + if (!scx_enabled()) + return; + /* + * Pairs with the smp_load_acquire() issued by a CPU in + * kick_cpus_irq_workfn() who is waiting for this CPU to perform a + * resched. + */ + smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1); +#endif +} + static inline void scx_notify_sched_tick(void) { unsigned long last_check; @@ -149,6 +166,9 @@ static inline int scx_check_setscheduler(struct task_struct *p, int policy) { return 0; } static inline bool scx_can_stop_tick(struct rq *rq) { return true; } static inline void init_sched_ext_class(void) {} +static inline void scx_notify_pick_next_task(struct rq *rq, + const struct task_struct *p, + const struct sched_class *active) {} static inline void scx_notify_sched_tick(void) {} #define for_each_active_class for_each_class diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index abb8ec22b6ef..d31185ecd090 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -708,6 +708,8 @@ struct scx_rq { u32 flags; cpumask_var_t cpus_to_kick; cpumask_var_t cpus_to_preempt; + cpumask_var_t cpus_to_wait; + u64 pnt_seq; struct irq_work kick_cpus_irq_work; }; #endif /* CONFIG_SCHED_CLASS_EXT */ From patchwork Sat Jan 28 00:16:34 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 13119580 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id AA2A4C38142 for ; Sat, 28 Jan 2023 00:20:06 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S233163AbjA1AUF (ORCPT ); Fri, 27 Jan 2023 19:20:05 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:36552 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S233032AbjA1ATI (ORCPT ); Fri, 27 Jan 2023 19:19:08 -0500 Received: from mail-pf1-x431.google.com (mail-pf1-x431.google.com [IPv6:2607:f8b0:4864:20::431]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 3BA7D1C31C; Fri, 27 Jan 2023 16:17:51 -0800 (PST) Received: by mail-pf1-x431.google.com with SMTP id cr11so2677179pfb.1; Fri, 27 Jan 2023 16:17:51 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=eqf3PC++b7JfcoHuY3h0riEmI/VRFmHWCWexWy07PQc=; b=eefJEFJ3NxysSfw35AciiLNaTsbRwXsX5hh0jbO5Cto7+R3ppdPGzrg8qZwQESNfvu LAIj7/vuUb9ZXXF4CVQYvtspgTkTmxeUHxTM1ryg7CE/94XVF7d7kWk2qxFhnG1PH3Wh XYWt/2ksGddbVk9L94usbn2CbZryhdvSO2aFlcte4/XLT6vR6HEOErvA5bRXGtP/iOJu zWgf5NnN/tx20H10Py5LLfjIxZXwwvZklObbhQJimuyiyU0A0zEtGhGhjY+CHujqsmNe Xu4wpwOiPrdt0BjV54hdKUdx2F2Am0daMgoK+Qv+aVnxpl3m0xxFJKIfBUMS7OFPle4B VHVQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=eqf3PC++b7JfcoHuY3h0riEmI/VRFmHWCWexWy07PQc=; b=F+rQFnEemdyzG3QsZ8iEOUophTf733QgFlkkrbOqnVn1TzLfXBLQfdSFRGm0S2FNCa b1/K75yC5ALG3I/J5O7jvbW6VtYXuUqDgC3mqr1x2avOgo/IazcX0LwfSljA6ofq245b OBQ/peMDI0g8osAsqiuXpGWIzceVMBVJX6g/0k6lJfy9rLJ0SLgkypYtDmcRlTJIkNzh PNnynQiXFZze9lkjVvuQ/pGWLkeMHv02P5Smks4zDuUhsN3MYyDGXE77nyy6vFaPV5uy v18cjSeMCp2zusQxbqoZdX8eJ6nRBs4b3GQsXjuq2OPMebuUFwDrUJ0Rc6VmnuwH5+o0 WKhg== X-Gm-Message-State: AO0yUKUdVAwCW6RUH2NrE6VkPGPiakJAhhJMmlYnKrz8hgon/c9nGIZe 4vX9IHIli6rkhpWkYGquNGE= X-Google-Smtp-Source: AK7set/38dpFxg5+sTGz0tchk+FoU3Xn0StKkhNAmZvRhrQhY9RQLm0U2bVU71lhAWWK/JXsxIwFKg== X-Received: by 2002:a05:6a00:180c:b0:592:ef03:6777 with SMTP id y12-20020a056a00180c00b00592ef036777mr3560331pfa.3.1674865058722; Fri, 27 Jan 2023 16:17:38 -0800 (PST) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id w71-20020a62824a000000b005905d2fe760sm3149821pfd.155.2023.01.27.16.17.38 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 27 Jan 2023 16:17:38 -0800 (PST) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 25/30] sched_ext: Implement sched_ext_ops.cpu_acquire/release() Date: Fri, 27 Jan 2023 14:16:34 -1000 Message-Id: <20230128001639.3510083-26-tj@kernel.org> X-Mailer: git-send-email 2.39.1 In-Reply-To: <20230128001639.3510083-1-tj@kernel.org> References: <20230128001639.3510083-1-tj@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org From: David Vernet Scheduler classes are strictly ordered and when a higher priority class has tasks to run, the lower priority ones lose access to the CPU. Being able to monitor and act on these events are necessary for use cases includling strict core-scheduling and latency management. This patch adds two operations ops.cpu_acquire() and .cpu_release(). The former is invoked when a CPU becomes available to the BPF scheduler and the opposite for the latter. This patch also implements scx_bpf_reenqueue_local() which can be called from .cpu_release() to trigger requeueing of all tasks in the local dsq of the CPU so that the tasks can be reassigned to other available CPUs. scx_example_pair is updated to use .cpu_acquire/release() along with %SCX_KICK_WAIT to make the pair scheduling guarantee strict even when a CPU is preempted by a higher priority scheduler class. scx_example_qmap is updated to use .cpu_acquire/release() to empty the local dsq of a preempted CPU. A similar approach can be adopted by BPF schedulers that want to have a tight control over latency. v2: Add p->scx.kf_mask annotation to allow calling scx_bpf_reenqueue_local() from ops.cpu_release() nested inside ops.init() and other sleepable operations. Signed-off-by: David Vernet Reviewed-by: Tejun Heo Signed-off-by: Tejun Heo Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- include/linux/sched/ext.h | 52 +++++++++ kernel/sched/ext.c | 139 ++++++++++++++++++++++++- kernel/sched/ext.h | 22 +++- kernel/sched/sched.h | 1 + tools/sched_ext/scx_common.bpf.h | 1 + tools/sched_ext/scx_example_pair.bpf.c | 101 +++++++++++++++++- tools/sched_ext/scx_example_qmap.bpf.c | 37 ++++++- tools/sched_ext/scx_example_qmap.c | 4 +- 8 files changed, 346 insertions(+), 11 deletions(-) diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h index 11d6902e717d..82ead36d1136 100644 --- a/include/linux/sched/ext.h +++ b/include/linux/sched/ext.h @@ -134,6 +134,32 @@ struct scx_cgroup_init_args { u32 weight; }; +enum scx_cpu_preempt_reason { + /* next task is being scheduled by &sched_class_rt */ + SCX_CPU_PREEMPT_RT, + /* next task is being scheduled by &sched_class_dl */ + SCX_CPU_PREEMPT_DL, + /* next task is being scheduled by &sched_class_stop */ + SCX_CPU_PREEMPT_STOP, + /* unknown reason for SCX being preempted */ + SCX_CPU_PREEMPT_UNKNOWN, +}; + +/* + * Argument container for ops->cpu_acquire(). Currently empty, but may be + * expanded in the future. + */ +struct scx_cpu_acquire_args {}; + +/* argument container for ops->cpu_release() */ +struct scx_cpu_release_args { + /* the reason the CPU was preempted */ + enum scx_cpu_preempt_reason reason; + + /* the task that's going to be scheduled on the CPU */ + const struct task_struct *task; +}; + /** * struct sched_ext_ops - Operation table for BPF scheduler implementation * @@ -320,6 +346,28 @@ struct sched_ext_ops { */ void (*update_idle)(s32 cpu, bool idle); + /** + * cpu_acquire - A CPU is becoming available to the BPF scheduler + * @cpu: The CPU being acquired by the BPF scheduler. + * @args: Acquire arguments, see the struct definition. + * + * A CPU that was previously released from the BPF scheduler is now once + * again under its control. + */ + void (*cpu_acquire)(s32 cpu, struct scx_cpu_acquire_args *args); + + /** + * cpu_release - A CPU is taken away from the BPF scheduler + * @cpu: The CPU being released by the BPF scheduler. + * @args: Release arguments, see the struct definition. + * + * The specified CPU is no longer under the control of the BPF + * scheduler. This could be because it was preempted by a higher + * priority sched_class, though there may be other reasons as well. The + * caller should consult @args->reason to determine the cause. + */ + void (*cpu_release)(s32 cpu, struct scx_cpu_release_args *args); + /** * prep_enable - Prepare to enable BPF scheduling for a task * @p: task to prepare BPF scheduling for @@ -522,8 +570,12 @@ enum scx_kf_mask { SCX_KF_INIT = 1 << 0, /* allowed from ops.init() */ SCX_KF_SLEEPABLE = 1 << 1, /* from sleepable init operations */ + /* ENQUEUE_DISPATCH may be nested inside CPU_RELEASE */ + SCX_KF_CPU_RELEASE = 1 << 2, /* from ops.cpu_release() */ + SCX_KF_ENQUEUE_DISPATCH = 1 << 3, /* from ops.enqueue() or .dispatch() */ SCX_KF_DISPATCH = 1 << 4, /* from ops.dispatch() */ + __SCX_KF_TERMINAL = SCX_KF_ENQUEUE_DISPATCH | SCX_KF_DISPATCH, }; /* diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index d1de6a44c4f5..072082968f0f 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -83,6 +83,7 @@ static bool scx_warned_zero_slice; static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_last); static DEFINE_STATIC_KEY_FALSE(scx_ops_enq_exiting); +DEFINE_STATIC_KEY_FALSE(scx_ops_cpu_preempt); static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled); struct static_key_false scx_has_op[SCX_NR_ONLINE_OPS] = @@ -193,9 +194,14 @@ static void scx_kf_allow(u32 mask) { u32 allowed_nesters = 0; - /* INIT|SLEEPABLE can nest others but not themselves */ + /* + * INIT|SLEEPABLE can nest others but not themselves. CPU_RELEASE can + * additionally nest ENQUEUE_DISPATCH. + */ if (!(mask & (SCX_KF_INIT | SCX_KF_SLEEPABLE))) allowed_nesters |= SCX_KF_INIT | SCX_KF_SLEEPABLE; + if (mask & SCX_KF_ENQUEUE_DISPATCH) + allowed_nesters |= SCX_KF_CPU_RELEASE; WARN_ONCE(current->scx.kf_mask & ~allowed_nesters, "invalid nesting current->scx.kf_mask=0x%x mask=0x%x allowed_nesters=0x%x\n", @@ -238,6 +244,12 @@ static bool scx_kf_allowed(u32 mask) return false; } + if (unlikely((mask & SCX_KF_CPU_RELEASE) && + (current->scx.kf_mask & __SCX_KF_TERMINAL))) { + scx_ops_error("cpu_release kfunc called from terminal operations"); + return false; + } + return true; } @@ -1276,6 +1288,19 @@ static int balance_scx(struct rq *rq, struct task_struct *prev, lockdep_assert_rq_held(rq); + if (static_branch_unlikely(&scx_ops_cpu_preempt) && + unlikely(rq->scx.cpu_released)) { + /* + * If the previous sched_class for the current CPU was not SCX, + * notify the BPF scheduler that it again has control of the + * core. This callback complements ->cpu_release(), which is + * emitted in scx_notify_pick_next_task(). + */ + if (SCX_HAS_OP(cpu_acquire)) + scx_ops.cpu_acquire(cpu_of(rq), NULL); + rq->scx.cpu_released = false; + } + if (prev_on_scx) { WARN_ON_ONCE(prev->scx.flags & SCX_TASK_BAL_KEEP); update_curr_scx(rq); @@ -1283,7 +1308,9 @@ static int balance_scx(struct rq *rq, struct task_struct *prev, /* * If @prev is runnable & has slice left, it has priority and * fetching more just increases latency for the fetched tasks. - * Tell put_prev_task_scx() to put @prev on local_dsq. + * Tell put_prev_task_scx() to put @prev on local_dsq. If the + * BPF scheduler wants to handle this explicitly, it should + * implement ->cpu_released(). * * See scx_ops_disable_workfn() for the explanation on the * disabling() test. @@ -1489,6 +1516,59 @@ static struct task_struct *pick_next_task_scx(struct rq *rq) return p; } +static enum scx_cpu_preempt_reason +preempt_reason_from_class(const struct sched_class *class) +{ +#ifdef CONFIG_SMP + if (class == &stop_sched_class) + return SCX_CPU_PREEMPT_STOP; +#endif + if (class == &dl_sched_class) + return SCX_CPU_PREEMPT_DL; + if (class == &rt_sched_class) + return SCX_CPU_PREEMPT_RT; + return SCX_CPU_PREEMPT_UNKNOWN; +} + +void __scx_notify_pick_next_task(struct rq *rq, + const struct task_struct *task, + const struct sched_class *active) +{ + lockdep_assert_rq_held(rq); + + /* + * The callback is conceptually meant to convey that the CPU is no + * longer under the control of SCX. Therefore, don't invoke the + * callback if the CPU is is staying on SCX, or going idle (in which + * case the SCX scheduler has actively decided not to schedule any + * tasks on the CPU). + */ + if (likely(active >= &ext_sched_class)) + return; + + /* + * At this point we know that SCX was preempted by a higher priority + * sched_class, so invoke the ->cpu_release() callback if we have not + * done so already. We only send the callback once between SCX being + * preempted, and it regaining control of the CPU. + * + * ->cpu_release() complements ->cpu_acquire(), which is emitted the + * next time that balance_scx() is invoked. + */ + if (!rq->scx.cpu_released) { + if (SCX_HAS_OP(cpu_release)) { + struct scx_cpu_release_args args = { + .reason = preempt_reason_from_class(active), + .task = task, + }; + + SCX_CALL_OP(SCX_KF_CPU_RELEASE, + cpu_release, cpu_of(rq), &args); + } + rq->scx.cpu_released = true; + } +} + #ifdef CONFIG_SMP static bool test_and_clear_cpu_idle(int cpu) @@ -2537,6 +2617,7 @@ static void scx_ops_disable_workfn(struct kthread_work *work) static_branch_disable_cpuslocked(&scx_has_op[i]); static_branch_disable_cpuslocked(&scx_ops_enq_last); static_branch_disable_cpuslocked(&scx_ops_enq_exiting); + static_branch_disable_cpuslocked(&scx_ops_cpu_preempt); static_branch_disable_cpuslocked(&scx_builtin_idle_enabled); synchronize_rcu(); @@ -2743,6 +2824,8 @@ static int scx_ops_enable(struct sched_ext_ops *ops) if (ops->flags & SCX_OPS_ENQ_EXITING) static_branch_enable_cpuslocked(&scx_ops_enq_exiting); + if (scx_ops.cpu_acquire || scx_ops.cpu_release) + static_branch_enable_cpuslocked(&scx_ops_cpu_preempt); if (!ops->update_idle || (ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE)) { reset_idle_masks(); @@ -3396,6 +3479,56 @@ static const struct btf_kfunc_id_set scx_kfunc_set_dispatch = { .set = &scx_kfunc_ids_dispatch, }; +/** + * scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ + * + * Iterate over all of the tasks currently enqueued on the local DSQ of the + * caller's CPU, and re-enqueue them in the BPF scheduler. Returns the number of + * processed tasks. Can only be called from ops.cpu_release(). + */ +u32 scx_bpf_reenqueue_local(void) +{ + u32 nr_enqueued, i; + struct rq *rq; + struct scx_rq *scx_rq; + + if (!scx_kf_allowed(SCX_KF_CPU_RELEASE)) + return 0; + + rq = cpu_rq(smp_processor_id()); + lockdep_assert_rq_held(rq); + scx_rq = &rq->scx; + + /* + * Get the number of tasks on the local DSQ before iterating over it to + * pull off tasks. The enqueue callback below can signal that it wants + * the task to stay on the local DSQ, and we want to prevent the BPF + * scheduler from causing us to loop indefinitely. + */ + nr_enqueued = scx_rq->local_dsq.nr; + for (i = 0; i < nr_enqueued; i++) { + struct task_struct *p; + + p = first_local_task(rq); + WARN_ON_ONCE(atomic64_read(&p->scx.ops_state) != SCX_OPSS_NONE); + WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_QUEUED)); + WARN_ON_ONCE(p->scx.holding_cpu != -1); + dispatch_dequeue(scx_rq, p); + do_enqueue_task(rq, p, SCX_ENQ_REENQ, -1); + } + + return nr_enqueued; +} + +BTF_SET8_START(scx_kfunc_ids_cpu_release) +BTF_ID_FLAGS(func, scx_bpf_reenqueue_local) +BTF_SET8_END(scx_kfunc_ids_cpu_release) + +static const struct btf_kfunc_id_set scx_kfunc_set_cpu_release = { + .owner = THIS_MODULE, + .set = &scx_kfunc_ids_cpu_release, +}; + /** * scx_bpf_kick_cpu - Trigger reschedule on a CPU * @cpu: cpu to kick @@ -3698,6 +3831,8 @@ static int __init register_ext_kfuncs(void) &scx_kfunc_set_enqueue_dispatch)) || (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &scx_kfunc_set_dispatch)) || + (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, + &scx_kfunc_set_cpu_release)) || (ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &scx_kfunc_set_any))) { pr_err("sched_ext: failed to register kfunc sets (%d)\n", ret); diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h index 39eb1b25ec99..099e17e92228 100644 --- a/kernel/sched/ext.h +++ b/kernel/sched/ext.h @@ -27,6 +27,17 @@ enum scx_enq_flags { */ SCX_ENQ_PREEMPT = 1LLU << 32, + /* + * The task being enqueued was previously enqueued on the current CPU's + * %SCX_DSQ_LOCAL, but was removed from it in a call to the + * bpf_scx_reenqueue_local() kfunc. If bpf_scx_reenqueue_local() was + * invoked in a ->cpu_release() callback, and the task is again + * dispatched back to %SCX_LOCAL_DSQ by this current ->enqueue(), the + * task will not be scheduled on the CPU until at least the next invocation + * of the ->cpu_acquire() callback. + */ + SCX_ENQ_REENQ = 1LLU << 40, + /* * The task being enqueued is the only task available for the cpu. By * default, ext core keeps executing such tasks but when @@ -82,6 +93,8 @@ DECLARE_STATIC_KEY_FALSE(__scx_switched_all); #define scx_enabled() static_branch_unlikely(&__scx_ops_enabled) #define scx_switched_all() static_branch_unlikely(&__scx_switched_all) +DECLARE_STATIC_KEY_FALSE(scx_ops_cpu_preempt); + bool task_on_scx(struct task_struct *p); void scx_pre_fork(struct task_struct *p); int scx_fork(struct task_struct *p); @@ -96,13 +109,17 @@ __printf(2, 3) void scx_ops_error_type(enum scx_exit_type type, #define scx_ops_error(fmt, args...) \ scx_ops_error_type(SCX_EXIT_ERROR, fmt, ##args) +void __scx_notify_pick_next_task(struct rq *rq, + const struct task_struct *p, + const struct sched_class *active); + static inline void scx_notify_pick_next_task(struct rq *rq, const struct task_struct *p, const struct sched_class *active) { -#ifdef CONFIG_SMP if (!scx_enabled()) return; +#ifdef CONFIG_SMP /* * Pairs with the smp_load_acquire() issued by a CPU in * kick_cpus_irq_workfn() who is waiting for this CPU to perform a @@ -110,6 +127,9 @@ static inline void scx_notify_pick_next_task(struct rq *rq, */ smp_store_release(&rq->scx.pnt_seq, rq->scx.pnt_seq + 1); #endif + if (!static_branch_unlikely(&scx_ops_cpu_preempt)) + return; + __scx_notify_pick_next_task(rq, p, active); } static inline void scx_notify_sched_tick(void) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index d31185ecd090..578b88f1dfac 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -706,6 +706,7 @@ struct scx_rq { u64 ops_qseq; u32 nr_running; u32 flags; + bool cpu_released; cpumask_var_t cpus_to_kick; cpumask_var_t cpus_to_preempt; cpumask_var_t cpus_to_wait; diff --git a/tools/sched_ext/scx_common.bpf.h b/tools/sched_ext/scx_common.bpf.h index ff32e4dd30a6..7d01db7d5e9f 100644 --- a/tools/sched_ext/scx_common.bpf.h +++ b/tools/sched_ext/scx_common.bpf.h @@ -81,6 +81,7 @@ void scx_bpf_put_idle_cpumask(const struct cpumask *cpumask) __ksym; void scx_bpf_destroy_dsq(u64 dsq_id) __ksym; bool scx_bpf_task_running(const struct task_struct *p) __ksym; s32 scx_bpf_task_cpu(const struct task_struct *p) __ksym; +u32 scx_bpf_reenqueue_local(void) __ksym; #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ #define PF_EXITING 0x00000004 diff --git a/tools/sched_ext/scx_example_pair.bpf.c b/tools/sched_ext/scx_example_pair.bpf.c index 8e277225b044..6d9f5b97cbeb 100644 --- a/tools/sched_ext/scx_example_pair.bpf.c +++ b/tools/sched_ext/scx_example_pair.bpf.c @@ -85,6 +85,28 @@ * be resolved in the near future which should allow greatly simplifying this * scheduler. * + * Dealing with preemption + * ----------------------- + * + * SCX is the lowest priority sched_class, and could be preempted by them at + * any time. To address this, the scheduler implements pair_cpu_release() and + * pair_cpu_acquire() callbacks which are invoked by the core scheduler when + * the scheduler loses and gains control of the CPU respectively. + * + * In pair_cpu_release(), we mark the pair_ctx as having been preempted, and + * then invoke: + * + * scx_bpf_kick_cpu(pair_cpu, SCX_KICK_PREEMPT | SCX_KICK_WAIT); + * + * This preempts the pair CPU, and waits until it has re-entered the scheduler + * before returning. This is necessary to ensure that the higher priority + * sched_class that preempted our scheduler does not schedule a task + * concurrently with our pair CPU. + * + * When the CPU is re-acquired in pair_cpu_acquire(), we unmark the preemption + * in the pair_ctx, and send another resched IPI to the pair CPU to re-enable + * pair scheduling. + * * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. * Copyright (c) 2022 Tejun Heo * Copyright (c) 2022 David Vernet @@ -124,6 +146,12 @@ struct pair_ctx { /* the CPUs that are currently active on the cgroup */ u32 active_mask; + + /* + * the CPUs that are currently preempted and running tasks in a + * different scheduler. + */ + u32 preempted_mask; }; struct { @@ -340,7 +368,7 @@ static int dispatch_loopfn(u32 idx, void *data) struct task_struct *p; u64 now = bpf_ktime_get_ns(); bool kick_pair = false; - bool expired; + bool expired, pair_preempted; u32 *vptr, in_pair_mask; s32 pid; u64 cgid; @@ -369,10 +397,14 @@ static int dispatch_loopfn(u32 idx, void *data) */ pairc->draining = true; - if (pairc->active_mask) { + pair_preempted = pairc->preempted_mask; + if (pairc->active_mask || pair_preempted) { /* - * The other CPU is still active We want to wait until - * this cgroup expires. + * The other CPU is still active, or is no longer under + * our control due to e.g. being preempted by a higher + * priority sched_class. We want to wait until this + * cgroup expires, or until control of our pair CPU has + * been returned to us. * * If the pair controls its CPU, and the time already * expired, kick. When the other CPU arrives at @@ -381,7 +413,7 @@ static int dispatch_loopfn(u32 idx, void *data) */ __sync_fetch_and_add(&nr_exp_waits, 1); bpf_spin_unlock(&pairc->lock); - if (expired) + if (expired && !pair_preempted) kick_pair = true; goto out_maybe_kick; } @@ -486,6 +518,63 @@ void BPF_STRUCT_OPS(pair_dispatch, s32 cpu, struct task_struct *prev) bpf_loop(1 << 23, dispatch_loopfn, &cpu_on_stack, 0); } +void BPF_STRUCT_OPS(pair_cpu_acquire, s32 cpu, struct scx_cpu_acquire_args *args) +{ + int ret; + u32 in_pair_mask; + struct pair_ctx *pairc; + bool kick_pair; + + ret = lookup_pairc_and_mask(cpu, &pairc, &in_pair_mask); + if (ret) + return; + + bpf_spin_lock(&pairc->lock); + pairc->preempted_mask &= ~in_pair_mask; + /* Kick the pair CPU, unless it was also preempted. */ + kick_pair = !pairc->preempted_mask; + bpf_spin_unlock(&pairc->lock); + + if (kick_pair) { + s32 *pair = (s32 *)MEMBER_VPTR(pair_cpu, [cpu]); + + if (pair) { + __sync_fetch_and_add(&nr_kicks, 1); + scx_bpf_kick_cpu(*pair, SCX_KICK_PREEMPT); + } + } +} + +void BPF_STRUCT_OPS(pair_cpu_release, s32 cpu, struct scx_cpu_release_args *args) +{ + int ret; + u32 in_pair_mask; + struct pair_ctx *pairc; + bool kick_pair; + + ret = lookup_pairc_and_mask(cpu, &pairc, &in_pair_mask); + if (ret) + return; + + bpf_spin_lock(&pairc->lock); + pairc->preempted_mask |= in_pair_mask; + pairc->active_mask &= ~in_pair_mask; + /* Kick the pair CPU if it's still running. */ + kick_pair = pairc->active_mask; + pairc->draining = true; + bpf_spin_unlock(&pairc->lock); + + if (kick_pair) { + s32 *pair = (s32 *)MEMBER_VPTR(pair_cpu, [cpu]); + + if (pair) { + __sync_fetch_and_add(&nr_kicks, 1); + scx_bpf_kick_cpu(*pair, SCX_KICK_PREEMPT | SCX_KICK_WAIT); + } + } + __sync_fetch_and_add(&nr_preemptions, 1); +} + static int alloc_cgrp_q_idx_loopfn(u32 idx, void *data) { u32 q_idx; @@ -547,6 +636,8 @@ SEC(".struct_ops") struct sched_ext_ops pair_ops = { .enqueue = (void *)pair_enqueue, .dispatch = (void *)pair_dispatch, + .cpu_acquire = (void *)pair_cpu_acquire, + .cpu_release = (void *)pair_cpu_release, .cgroup_init = (void *)pair_cgroup_init, .cgroup_exit = (void *)pair_cgroup_exit, .init = (void *)pair_init, diff --git a/tools/sched_ext/scx_example_qmap.bpf.c b/tools/sched_ext/scx_example_qmap.bpf.c index e968a9b341a4..7e670986542b 100644 --- a/tools/sched_ext/scx_example_qmap.bpf.c +++ b/tools/sched_ext/scx_example_qmap.bpf.c @@ -11,6 +11,8 @@ * * - BPF-side queueing using PIDs. * - Sleepable per-task storage allocation using ops.prep_enable(). + * - Using ops.cpu_release() to handle a higher priority scheduling class taking + * the CPU away. * * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. * Copyright (c) 2022 Tejun Heo @@ -78,7 +80,7 @@ struct { } dispatch_idx_cnt SEC(".maps"); /* Statistics */ -unsigned long nr_enqueued, nr_dispatched, nr_dequeued; +unsigned long nr_enqueued, nr_dispatched, nr_reenqueued, nr_dequeued; s32 BPF_STRUCT_OPS(qmap_select_cpu, struct task_struct *p, s32 prev_cpu, u64 wake_flags) @@ -152,6 +154,22 @@ void BPF_STRUCT_OPS(qmap_enqueue, struct task_struct *p, u64 enq_flags) return; } + /* + * If the task was re-enqueued due to the CPU being preempted by a + * higher priority scheduling class, just re-enqueue the task directly + * on the global DSQ. As we want another CPU to pick it up, find and + * kick an idle CPU. + */ + if (enq_flags & SCX_ENQ_REENQ) { + s32 cpu; + + scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, 0, enq_flags); + cpu = scx_bpf_pick_idle_cpu(p->cpus_ptr); + if (cpu >= 0) + scx_bpf_kick_cpu(cpu, 0); + return; + } + ring = bpf_map_lookup_elem(&queue_arr, &idx); if (!ring) { scx_bpf_error("failed to find ring %d", idx); @@ -237,6 +255,22 @@ void BPF_STRUCT_OPS(qmap_dispatch, s32 cpu, struct task_struct *prev) } } +void BPF_STRUCT_OPS(qmap_cpu_release, s32 cpu, struct scx_cpu_release_args *args) +{ + u32 cnt; + + /* + * Called when @cpu is taken by a higher priority scheduling class. This + * makes @cpu no longer available for executing sched_ext tasks. As we + * don't want the tasks in @cpu's local dsq to sit there until @cpu + * becomes available again, re-enqueue them into the global dsq. See + * %SCX_ENQ_REENQ handling in qmap_enqueue(). + */ + cnt = scx_bpf_reenqueue_local(); + if (cnt) + __sync_fetch_and_add(&nr_reenqueued, cnt); +} + s32 BPF_STRUCT_OPS(qmap_prep_enable, struct task_struct *p, struct scx_enable_args *args) { @@ -272,6 +306,7 @@ struct sched_ext_ops qmap_ops = { .enqueue = (void *)qmap_enqueue, .dequeue = (void *)qmap_dequeue, .dispatch = (void *)qmap_dispatch, + .cpu_release = (void *)qmap_cpu_release, .prep_enable = (void *)qmap_prep_enable, .init = (void *)qmap_init, .exit = (void *)qmap_exit, diff --git a/tools/sched_ext/scx_example_qmap.c b/tools/sched_ext/scx_example_qmap.c index 820fe50bf43c..de6f03ccb233 100644 --- a/tools/sched_ext/scx_example_qmap.c +++ b/tools/sched_ext/scx_example_qmap.c @@ -91,9 +91,9 @@ int main(int argc, char **argv) long nr_enqueued = skel->bss->nr_enqueued; long nr_dispatched = skel->bss->nr_dispatched; - printf("enq=%lu, dsp=%lu, delta=%ld, deq=%lu\n", + printf("enq=%lu, dsp=%lu, delta=%ld, reenq=%lu, deq=%lu\n", nr_enqueued, nr_dispatched, nr_enqueued - nr_dispatched, - skel->bss->nr_dequeued); + skel->bss->nr_reenqueued, skel->bss->nr_dequeued); fflush(stdout); sleep(1); } From patchwork Sat Jan 28 00:16:35 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 13119581 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 39E08C61DA7 for ; Sat, 28 Jan 2023 00:20:09 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S233469AbjA1AUH (ORCPT ); Fri, 27 Jan 2023 19:20:07 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:37572 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232904AbjA1ATK (ORCPT ); Fri, 27 Jan 2023 19:19:10 -0500 Received: from mail-pf1-x435.google.com (mail-pf1-x435.google.com [IPv6:2607:f8b0:4864:20::435]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id D4E6020D31; Fri, 27 Jan 2023 16:17:57 -0800 (PST) Received: by mail-pf1-x435.google.com with SMTP id u5so4032839pfm.10; Fri, 27 Jan 2023 16:17:57 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=q8dhPlwDkUf9/cVdreQsI02YoQT+NTF288fEP0czgDg=; b=TEdOLq/I1+MrAVG6CHqzZzek0q8+M8rJTdF7Sc7jRNZ5yzNPo8SP94e77wAO7XgQuC eZNsSRiZDQW1klMaAu/h8U6oXbWv3okaBEQoDGaZjUmhHol/MM5CDFj/qrSA0gsXBeBG kYqq829YNcy55wkrHNDklGNsEE8/IhYgAYdIIQfAUNcC7qhvbL9jjuWKzOS/xxv8qur4 //xN9ie7fuly+nqM+l1nTvy11prnlNYu9NThokxGX+L3mGdaoaXg9kIKD3MZwvzpk9XA PERs+C1bKFW7+ajvwc79U/gaxqeViNRcUPdGdnxKKHV3i68k/IlGkeifH2e2x31iF+vN AcUA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=q8dhPlwDkUf9/cVdreQsI02YoQT+NTF288fEP0czgDg=; b=ueseWKANu3YjGBt2uW16FLhtLJFdfMxuQUHCaj+5e16+ehepAWUq7Cjw09ZXrr1lmy vKA+oFU8IQep7TbMbE3/YnCVCJ/Yvh14YPk4HuYR+VJxHli018JDcqBSMMVakk5uJ1Hx 96ORUNGLlqYHQ4zO7eINT/PVfH1qZha9rnlTbvc3ab6F5zTvQgKz8/NiId56/I3PrL+C OEnQmxvF2TV+N3ONDKqzkWPLtTmpEz2kmOHf9H87hWUh+sMFm9OpQV6CEh7bV+H4h6XU A8ZhSslipNmtJbMIDoDFBaEyBAC1yebCCmz3utBmARe82gdseNAOI3Y0t/e1FuQQQhd4 99aw== X-Gm-Message-State: AO0yUKUbbF9gw4QoeTrAr+FOMv8mI+uvUk1PSdd+mAalb9RS9Gzmr16N aDER4jmn3RyQrY8MohBaRjw= X-Google-Smtp-Source: AK7set/hImDu1ceYY/CLBF2S6sibkELRQHuTEFfkzRzrjuk+xMH7BE4lJkXeuGvFe3dh6RMMy2s/2w== X-Received: by 2002:a05:6a00:1707:b0:593:4815:b2ec with SMTP id h7-20020a056a00170700b005934815b2ecmr2944144pfc.8.1674865060487; Fri, 27 Jan 2023 16:17:40 -0800 (PST) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id c10-20020aa78c0a000000b00582bdaab584sm3202452pfd.81.2023.01.27.16.17.39 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 27 Jan 2023 16:17:40 -0800 (PST) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 26/30] sched_ext: Implement sched_ext_ops.cpu_online/offline() Date: Fri, 27 Jan 2023 14:16:35 -1000 Message-Id: <20230128001639.3510083-27-tj@kernel.org> X-Mailer: git-send-email 2.39.1 In-Reply-To: <20230128001639.3510083-1-tj@kernel.org> References: <20230128001639.3510083-1-tj@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org Add ops.cpu_online/offline() which are invoked when CPUs come online and offline respectively. As the enqueue path already automatically bypasses tasks to the local dsq on a deactivated CPU, BPF schedulers are guaranteed to see tasks only on CPUs which are between online() and offline(). Signed-off-by: Tejun Heo Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden --- include/linux/sched/ext.h | 18 ++++++++++++++++++ kernel/sched/ext.c | 15 +++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h index 82ead36d1136..01c846445243 100644 --- a/include/linux/sched/ext.h +++ b/include/linux/sched/ext.h @@ -368,6 +368,24 @@ struct sched_ext_ops { */ void (*cpu_release)(s32 cpu, struct scx_cpu_release_args *args); + /** + * cpu_online - A CPU became online + * @cpu: CPU which just came up + * + * @cpu just came online. @cpu doesn't call ops.enqueue() or run tasks + * associated with other CPUs beforehand. + */ + void (*cpu_online)(s32 cpu); + + /** + * cpu_offline - A CPU is going offline + * @cpu: CPU which is going offline + * + * @cpu is going offline. @cpu doesn't call ops.enqueue() or run tasks + * associated with other CPUs afterwards. + */ + void (*cpu_offline)(s32 cpu); + /** * prep_enable - Prepare to enable BPF scheduling for a task * @p: task to prepare BPF scheduling for diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 072082968f0f..e981b7111e0a 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -1727,6 +1727,18 @@ void __scx_update_idle(struct rq *rq, bool idle) } } +static void rq_online_scx(struct rq *rq, enum rq_onoff_reason reason) +{ + if (SCX_HAS_OP(cpu_online) && reason == RQ_ONOFF_HOTPLUG) + scx_ops.cpu_online(cpu_of(rq)); +} + +static void rq_offline_scx(struct rq *rq, enum rq_onoff_reason reason) +{ + if (SCX_HAS_OP(cpu_offline) && reason == RQ_ONOFF_HOTPLUG) + scx_ops.cpu_offline(cpu_of(rq)); +} + #else /* !CONFIG_SMP */ static bool test_and_clear_cpu_idle(int cpu) { return false; } @@ -2215,6 +2227,9 @@ DEFINE_SCHED_CLASS(ext) = { .balance = balance_scx, .select_task_rq = select_task_rq_scx, .set_cpus_allowed = set_cpus_allowed_scx, + + .rq_online = rq_online_scx, + .rq_offline = rq_offline_scx, #endif .task_tick = task_tick_scx, From patchwork Sat Jan 28 00:16:36 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 13119582 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 1B427C61DB3 for ; Sat, 28 Jan 2023 00:20:12 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S233075AbjA1AUJ (ORCPT ); Fri, 27 Jan 2023 19:20:09 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:36812 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232923AbjA1ATM (ORCPT ); Fri, 27 Jan 2023 19:19:12 -0500 Received: from mail-pj1-x102f.google.com (mail-pj1-x102f.google.com [IPv6:2607:f8b0:4864:20::102f]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 4E6D278AFE; Fri, 27 Jan 2023 16:18:02 -0800 (PST) Received: by mail-pj1-x102f.google.com with SMTP id lp10so6129778pjb.4; Fri, 27 Jan 2023 16:18:02 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=ReDv27JER3dWLJIRB93KpZI6obffj98ubYVenNueosw=; b=T8n40ErxF3us6xFV14zV3IG3WNSgwVq7BpumZs6w5W6Mim2fos8m3FrW4r7e4BNh/X iQO4Dbz+8zUlLhGWkRoz1TLqHvBu69CufLHp8+/bm/xL6iS7lFhzqQCHcMPCbuphC8Uy QMjQdZgR1hONYwV9EVg96QSZDP3CFnmgR0WJu6UZ+LOu2IKXA4NFqqsll0082PtizkJ7 wv3byKScU+RjC3XzA7bmJdy2MVpJYjW55XkLP9jVGM2z9YhYGoXAdVCyhUDdyKENtKAa 9+PJ1ifQmlfMqW6oMQahF2LsdbGGXd8LY5i7zdgil5CXsxc5E2Nja/zS2TG0xlgoDn6a S3ag== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=ReDv27JER3dWLJIRB93KpZI6obffj98ubYVenNueosw=; b=5pHWWNiqzOD4ER7nAWp1CYl6ae4I84unOOtWl6e8W4u91GuFNFfzib7pHz51pnaYwK ZCYAf/RqaBJDC2Fg6kbll9vaOe4sz+ygfzOyz0INBTPbAdGwUqVV60AaorRpXq8rrPI+ oDZDygBqc/nJeji1b5bOVyiybUujJiyGZIiVhprWcg676Q7OIatUy822MVtmZexFnzWN dS7FIm9ideOiORPrI9OVjz7mdDcA4G/nojKaVuw6kPaSQk41UhtRKYesjzy4e7Nn/mXI WTQDcVLMLoD052bbubKEsvc0lcx1yLgQMBdKpWckyAutSwbTEKvoXk0XVMKc/4omz1YX jkjg== X-Gm-Message-State: AO0yUKWrnEzH848O+aIoR/04bTf+ZEwHLd3d+Mr+ETaJBJsNM333yYFA aGbsOT12G7ZjXNC7X4suKAs= X-Google-Smtp-Source: AK7set/Bqw3EA5XXnZ7HF3WGghw4ti5lpIaf8L+rC1O4eoc3Z2XhUzCzV5vfQFoEoJq6TP7BHLhzoQ== X-Received: by 2002:a17:90b:4d8b:b0:22b:f622:56ae with SMTP id oj11-20020a17090b4d8b00b0022bf62256aemr15630542pjb.23.1674865062307; Fri, 27 Jan 2023 16:17:42 -0800 (PST) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id bb8-20020a17090b008800b0020a11217682sm3307187pjb.27.2023.01.27.16.17.41 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 27 Jan 2023 16:17:41 -0800 (PST) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 27/30] sched_ext: Implement core-sched support Date: Fri, 27 Jan 2023 14:16:36 -1000 Message-Id: <20230128001639.3510083-28-tj@kernel.org> X-Mailer: git-send-email 2.39.1 In-Reply-To: <20230128001639.3510083-1-tj@kernel.org> References: <20230128001639.3510083-1-tj@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org The core-sched support is composed of the following parts: * task_struct->scx.core_sched_at is added. This is a timestamp which can be used to order tasks. Depending on whether the BPF scheduler implements custom ordering, it tracks either global FIFO ordering of all tasks or local-DSQ ordering within the dispatched tasks on a CPU. * prio_less() is updated to call scx_prio_less() when comparing SCX tasks. scx_prio_less() calls ops.core_sched_before() if available or uses the core_sched_at timestamp. For global FIFO ordering, the BPF scheduler doesn't need to do anything. Otherwise, it should implement ops.core_sched_before() which reflects the ordering. * When core-sched is enabled, balance_scx() balances all SMT siblings so that they all have tasks dispatched if necessary before pick_task_scx() is called. pick_task_scx() picks between the current task and the first dispatched task on the local DSQ based on availability and the core_sched_at timestamps. Note that FIFO ordering is expected among the already dispatched tasks whether running or on the local DSQ, so this path always compares core_sched_at instead of calling into ops.core_sched_before(). qmap_core_sched_before() is added to scx_example_qmap. It scales the distances from the heads of the queues to compare the tasks across different priority queues and seems to behave as expected. Signed-off-by: Tejun Heo Reviewed-by: David Vernet Reviewed-by: Josh Don --- include/linux/sched/ext.h | 21 +++ kernel/Kconfig.preempt | 2 +- kernel/sched/core.c | 12 +- kernel/sched/ext.c | 196 +++++++++++++++++++++++-- kernel/sched/ext.h | 12 ++ tools/sched_ext/scx_example_qmap.bpf.c | 87 ++++++++++- tools/sched_ext/scx_example_qmap.c | 5 +- 7 files changed, 319 insertions(+), 16 deletions(-) diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h index 01c846445243..d3c2701bb4b4 100644 --- a/include/linux/sched/ext.h +++ b/include/linux/sched/ext.h @@ -315,6 +315,24 @@ struct sched_ext_ops { */ bool (*yield)(struct task_struct *from, struct task_struct *to); + /** + * core_sched_before - Task ordering for core-sched + * @a: task A + * @b: task B + * + * Used by core-sched to determine the ordering between two tasks. See + * Documentation/admin-guide/hw-vuln/core-scheduling.rst for details on + * core-sched. + * + * Both @a and @b are runnable and may or may not currently be queued on + * the BPF scheduler. Should return %true if @a should run before @b. + * %false if there's no required ordering or @b should run before @a. + * + * If not specified, the default is ordering them according to when they + * became runnable. + */ + bool (*core_sched_before)(struct task_struct *a, struct task_struct *b); + /** * set_cpumask - Set CPU affinity * @p: task to set CPU affinity for @@ -611,6 +629,9 @@ struct sched_ext_entity { u32 kf_mask; /* see scx_kf_mask above */ atomic64_t ops_state; unsigned long runnable_at; +#ifdef CONFIG_SCHED_CORE + u64 core_sched_at; /* see scx_prio_less() */ +#endif /* BPF scheduler modifiable fields */ diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt index 0afcda19bc50..e12a057ead7b 100644 --- a/kernel/Kconfig.preempt +++ b/kernel/Kconfig.preempt @@ -135,7 +135,7 @@ config SCHED_CORE config SCHED_CLASS_EXT bool "Extensible Scheduling Class" - depends on BPF_SYSCALL && BPF_JIT && !SCHED_CORE + depends on BPF_SYSCALL && BPF_JIT help This option enables a new scheduler class sched_ext (SCX), which allows scheduling policies to be implemented as BPF programs to diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 47334e428031..a40b74a2fdbd 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -163,7 +163,12 @@ static inline int __task_prio(struct task_struct *p) if (p->sched_class == &idle_sched_class) return MAX_RT_PRIO + NICE_WIDTH; /* 140 */ - return MAX_RT_PRIO + MAX_NICE; /* 120, squash fair */ +#ifdef CONFIG_SCHED_CLASS_EXT + if (p->sched_class == &ext_sched_class) + return MAX_RT_PRIO + MAX_NICE + 1; /* 120, squash ext */ +#endif + + return MAX_RT_PRIO + MAX_NICE; /* 119, squash fair */ } /* @@ -191,6 +196,11 @@ static inline bool prio_less(struct task_struct *a, struct task_struct *b, bool if (pa == MAX_RT_PRIO + MAX_NICE) /* fair */ return cfs_prio_less(a, b, in_fi); +#ifdef CONFIG_SCHED_CLASS_EXT + if (pa == MAX_RT_PRIO + MAX_NICE + 1) /* ext */ + return scx_prio_less(a, b, in_fi); +#endif + return false; } diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index e981b7111e0a..8619eb2dcbd5 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -447,6 +447,44 @@ static int ops_sanitize_err(const char *ops_name, s32 err) return -EPROTO; } +/** + * touch_core_sched - Update timestamp used for core-sched task ordering + * @rq: rq to read clock from, must be locked + * @p: task to update the timestamp for + * + * Update @p->scx.core_sched_at timestamp. This is used by scx_prio_less() to + * implement global or local-DSQ FIFO ordering for core-sched. Should be called + * when a task becomes runnable and its turn on the CPU ends (e.g. slice + * exhaustion). + */ +static void touch_core_sched(struct rq *rq, struct task_struct *p) +{ +#ifdef CONFIG_SCHED_CORE + p->scx.core_sched_at = rq_clock_task(rq); +#endif +} + +/** + * touch_core_sched_dispatch - Update core-sched timestamp on dispatch + * @rq: rq to read clock from, must be locked + * @p: task being dispatched + * + * If the BPF scheduler implements custom core-sched ordering via + * ops.core_sched_before(), @p->scx.core_sched_at is used to implement FIFO + * ordering within each local DSQ. This function is called from dispatch paths + * and updates @p->scx.core_sched_at if custom core-sched ordering is in effect. + */ +static void touch_core_sched_dispatch(struct rq *rq, struct task_struct *p) +{ + lockdep_assert_rq_held(rq); + assert_clock_updated(rq); + +#ifdef CONFIG_SCHED_CORE + if (SCX_HAS_OP(core_sched_before)) + touch_core_sched(rq, p); +#endif +} + static void update_curr_scx(struct rq *rq) { struct task_struct *curr = rq->curr; @@ -462,8 +500,11 @@ static void update_curr_scx(struct rq *rq) account_group_exec_runtime(curr, delta_exec); cgroup_account_cputime(curr, delta_exec); - if (curr->scx.slice != SCX_SLICE_INF) + if (curr->scx.slice != SCX_SLICE_INF) { curr->scx.slice -= min(curr->scx.slice, delta_exec); + if (!curr->scx.slice) + touch_core_sched(rq, curr); + } } static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p, @@ -619,6 +660,8 @@ static void direct_dispatch(struct task_struct *ddsp_task, struct task_struct *p return; } + touch_core_sched_dispatch(task_rq(p), p); + dsq = find_dsq_for_dispatch(task_rq(p), dsq_id, p); dispatch_enqueue(dsq, p, enq_flags | SCX_ENQ_CLEAR_OPSS); @@ -702,12 +745,19 @@ static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags, return; local: + /* + * For task-ordering, slice refill must be treated as implying the end + * of the current slice. Otherwise, the longer @p stays on the CPU, the + * higher priority it becomes from scx_prio_less()'s POV. + */ + touch_core_sched(rq, p); p->scx.slice = SCX_SLICE_DFL; local_norefill: dispatch_enqueue(&rq->scx.local_dsq, p, enq_flags); return; global: + touch_core_sched(rq, p); /* see the comment in local: */ p->scx.slice = SCX_SLICE_DFL; dispatch_enqueue(&scx_dsq_global, p, enq_flags); } @@ -762,6 +812,9 @@ static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags if (SCX_HAS_OP(runnable)) scx_ops.runnable(p, enq_flags); + if (enq_flags & SCX_ENQ_WAKEUP) + touch_core_sched(rq, p); + do_enqueue_task(rq, p, enq_flags, sticky_cpu); } @@ -1201,6 +1254,7 @@ static void finish_dispatch(struct rq *rq, struct rq_flags *rf, struct scx_dispatch_q *dsq; u64 opss; + touch_core_sched_dispatch(rq, p); retry: /* * No need for _acquire here. @p is accessed only after a successful @@ -1278,8 +1332,8 @@ static void flush_dispatch_buf(struct rq *rq, struct rq_flags *rf) dspc->buf_cursor = 0; } -static int balance_scx(struct rq *rq, struct task_struct *prev, - struct rq_flags *rf) +static int balance_one(struct rq *rq, struct task_struct *prev, + struct rq_flags *rf, bool local) { struct scx_rq *scx_rq = &rq->scx; struct scx_dsp_ctx *dspc = this_cpu_ptr(&scx_dsp_ctx); @@ -1302,7 +1356,7 @@ static int balance_scx(struct rq *rq, struct task_struct *prev, } if (prev_on_scx) { - WARN_ON_ONCE(prev->scx.flags & SCX_TASK_BAL_KEEP); + WARN_ON_ONCE(local && (prev->scx.flags & SCX_TASK_BAL_KEEP)); update_curr_scx(rq); /* @@ -1314,10 +1368,16 @@ static int balance_scx(struct rq *rq, struct task_struct *prev, * * See scx_ops_disable_workfn() for the explanation on the * disabling() test. + * + * When balancing a remote CPU for core-sched, there won't be a + * following put_prev_task_scx() call and we don't own + * %SCX_TASK_BAL_KEEP. Instead, pick_task_scx() will test the + * same conditions later and pick @rq->curr accordingly. */ if ((prev->scx.flags & SCX_TASK_QUEUED) && prev->scx.slice && !scx_ops_disabling()) { - prev->scx.flags |= SCX_TASK_BAL_KEEP; + if (local) + prev->scx.flags |= SCX_TASK_BAL_KEEP; return 1; } } @@ -1373,10 +1433,55 @@ static int balance_scx(struct rq *rq, struct task_struct *prev, return 0; } +static int balance_scx(struct rq *rq, struct task_struct *prev, + struct rq_flags *rf) +{ + int ret; + + ret = balance_one(rq, prev, rf, true); + + /* + * When core-sched is enabled, this ops.balance() call will be followed + * by put_prev_scx() and pick_task_scx() on this CPU and pick_task_scx() + * on the SMT siblings. Balance the siblings too. + */ + if (sched_core_enabled(rq)) { + const struct cpumask *smt_mask = cpu_smt_mask(cpu_of(rq)); + int scpu; + + for_each_cpu_andnot(scpu, smt_mask, cpumask_of(cpu_of(rq))) { + struct rq *srq = cpu_rq(scpu); + struct rq_flags srf; + struct task_struct *sprev = srq->curr; + + /* + * While core-scheduling, rq lock is shared among + * siblings but the debug annotations and rq clock + * aren't. Do pinning dance to transfer the ownership. + */ + WARN_ON_ONCE(__rq_lockp(rq) != __rq_lockp(srq)); + rq_unpin_lock(rq, rf); + rq_pin_lock(srq, &srf); + + update_rq_clock(srq); + balance_one(srq, sprev, &srf, false); + + rq_unpin_lock(srq, &srf); + rq_repin_lock(rq, rf); + } + } + + return ret; +} + static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first) { if (p->scx.flags & SCX_TASK_QUEUED) { - WARN_ON_ONCE(atomic64_read(&p->scx.ops_state) != SCX_OPSS_NONE); + /* + * Core-sched might decide to execute @p before it is + * dispatched. Call ops_dequeue() to notify the BPF scheduler. + */ + ops_dequeue(p, SCX_DEQ_CORE_SCHED_EXEC); dispatch_dequeue(&rq->scx, p); } @@ -1516,6 +1621,69 @@ static struct task_struct *pick_next_task_scx(struct rq *rq) return p; } +#ifdef CONFIG_SCHED_CORE +/** + * scx_prio_less - Task ordering for core-sched + * @a: task A + * @b: task B + * + * Core-sched is implemented as an additional scheduling layer on top of the + * usual sched_class'es and needs to find out the expected task ordering. For + * SCX, core-sched calls this function to interrogate the task ordering. + * + * Unless overridden by ops.core_sched_before(), @p->scx.core_sched_at is used + * to implement the default task ordering. The older the timestamp, the higher + * prority the task - the global FIFO ordering matching the default scheduling + * behavior. + * + * When ops.core_sched_before() is enabled, @p->scx.core_sched_at is used to + * implement FIFO ordering within each local DSQ. See pick_task_scx(). + */ +bool scx_prio_less(struct task_struct *a, struct task_struct *b, bool in_fi) +{ + if (SCX_HAS_OP(core_sched_before) && !scx_ops_disabling()) + return scx_ops.core_sched_before(a, b); + else + return time_after64(a->scx.core_sched_at, b->scx.core_sched_at); +} + +/** + * pick_task_scx - Pick a candidate task for core-sched + * @rq: rq to pick the candidate task from + * + * Core-sched calls this function on each SMT sibling to determine the next + * tasks to run on the SMT siblings. balance_one() has been called on all + * siblings and put_prev_task_scx() has been called only for the current CPU. + * + * As put_prev_task_scx() hasn't been called on remote CPUs, we can't just look + * at the first task in the local dsq. @rq->curr has to be considered explicitly + * to mimic %SCX_TASK_BAL_KEEP. + */ +static struct task_struct *pick_task_scx(struct rq *rq) +{ + struct task_struct *curr = rq->curr; + struct task_struct *first = first_local_task(rq); + + if (curr->scx.flags & SCX_TASK_QUEUED) { + /* is curr the only runnable task? */ + if (!first) + return curr; + + /* + * Does curr trump first? We can always go by core_sched_at for + * this comparison as it represents global FIFO ordering when + * the default core-sched ordering is in used and local-DSQ FIFO + * ordering otherwise. + */ + if (curr->scx.slice && time_before64(curr->scx.core_sched_at, + first->scx.core_sched_at)) + return curr; + } + + return first; /* this may be %NULL */ +} +#endif /* CONFIG_SCHED_CORE */ + static enum scx_cpu_preempt_reason preempt_reason_from_class(const struct sched_class *class) { @@ -1795,11 +1963,13 @@ static void task_tick_scx(struct rq *rq, struct task_struct *curr, int queued) update_curr_scx(rq); /* - * While disabling, always resched as we can't trust the slice - * management. + * While disabling, always resched and refresh core-sched timestamp as + * we can't trust the slice management or ops.core_sched_before(). */ - if (scx_ops_disabling()) + if (scx_ops_disabling()) { curr->scx.slice = 0; + touch_core_sched(rq, curr); + } if (!curr->scx.slice) resched_curr(rq); @@ -2232,6 +2402,10 @@ DEFINE_SCHED_CLASS(ext) = { .rq_offline = rq_offline_scx, #endif +#ifdef CONFIG_SCHED_CORE + .pick_task = pick_task_scx, +#endif + .task_tick = task_tick_scx, .switching_to = switching_to_scx, @@ -2560,9 +2734,11 @@ static void scx_ops_disable_workfn(struct kthread_work *work) * * b. balance_scx() never sets %SCX_TASK_BAL_KEEP as the slice value * can't be trusted. Whenever a tick triggers, the running task is - * rotated to the tail of the queue. + * rotated to the tail of the queue with core_sched_at touched. * * c. pick_next_task() suppresses zero slice warning. + * + * d. scx_prio_less() reverts to the default core_sched_at order. */ scx_ops.enqueue = scx_ops_fallback_enqueue; scx_ops.dispatch = scx_ops_fallback_dispatch; diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h index 099e17e92228..c3df39984fc9 100644 --- a/kernel/sched/ext.h +++ b/kernel/sched/ext.h @@ -68,6 +68,14 @@ enum scx_enq_flags { enum scx_deq_flags { /* expose select DEQUEUE_* flags as enums */ SCX_DEQ_SLEEP = DEQUEUE_SLEEP, + + /* high 32bits are SCX specific */ + + /* + * The generic core-sched layer decided to execute the task even though + * it hasn't been dispatched yet. Dequeue from the BPF side. + */ + SCX_DEQ_CORE_SCHED_EXEC = 1LLU << 32, }; enum scx_tg_flags { @@ -173,6 +181,10 @@ static inline const struct sched_class *next_active_class(const struct sched_cla for_active_class_range(class, (prev_class) > &ext_sched_class ? \ &ext_sched_class : (prev_class), (end_class)) +#ifdef CONFIG_SCHED_CORE +bool scx_prio_less(struct task_struct *a, struct task_struct *b, bool in_fi); +#endif + #else /* CONFIG_SCHED_CLASS_EXT */ #define scx_enabled() false diff --git a/tools/sched_ext/scx_example_qmap.bpf.c b/tools/sched_ext/scx_example_qmap.bpf.c index 7e670986542b..7d851fd987ac 100644 --- a/tools/sched_ext/scx_example_qmap.bpf.c +++ b/tools/sched_ext/scx_example_qmap.bpf.c @@ -13,6 +13,7 @@ * - Sleepable per-task storage allocation using ops.prep_enable(). * - Using ops.cpu_release() to handle a higher priority scheduling class taking * the CPU away. + * - Core-sched support. * * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. * Copyright (c) 2022 Tejun Heo @@ -59,9 +60,21 @@ struct { }, }; +/* + * Per-queue sequence numbers to implement core-sched ordering. + * + * Tail seq is assigned to each queued task and incremented. Head seq tracks the + * sequence number of the latest dispatched task. The distance between the a + * task's seq and the associated queue's head seq is called the queue distance + * and used when comparing two tasks for ordering. See qmap_core_sched_before(). + */ +static u64 core_sched_head_seqs[5]; +static u64 core_sched_tail_seqs[5]; + /* Per-task scheduling context */ struct task_ctx { bool force_local; /* Dispatch directly to local_dsq */ + u64 core_sched_seq; }; struct { @@ -81,6 +94,7 @@ struct { /* Statistics */ unsigned long nr_enqueued, nr_dispatched, nr_reenqueued, nr_dequeued; +unsigned long nr_core_sched_execed; s32 BPF_STRUCT_OPS(qmap_select_cpu, struct task_struct *p, s32 prev_cpu, u64 wake_flags) @@ -147,8 +161,18 @@ void BPF_STRUCT_OPS(qmap_enqueue, struct task_struct *p, u64 enq_flags) return; } - /* Is select_cpu() is telling us to enqueue locally? */ - if (tctx->force_local) { + /* + * All enqueued tasks must have their core_sched_seq updated for correct + * core-sched ordering, which is why %SCX_OPS_ENQ_LAST is specified in + * qmap_ops.flags. + */ + tctx->core_sched_seq = core_sched_tail_seqs[idx]++; + + /* + * If qmap_select_cpu() is telling us to or this is the last runnable + * task on the CPU, enqueue locally. + */ + if (tctx->force_local || (enq_flags & SCX_ENQ_LAST)) { tctx->force_local = false; scx_bpf_dispatch(p, SCX_DSQ_LOCAL, slice_ns, enq_flags); return; @@ -192,6 +216,19 @@ void BPF_STRUCT_OPS(qmap_enqueue, struct task_struct *p, u64 enq_flags) void BPF_STRUCT_OPS(qmap_dequeue, struct task_struct *p, u64 deq_flags) { __sync_fetch_and_add(&nr_dequeued, 1); + if (deq_flags & SCX_DEQ_CORE_SCHED_EXEC) + __sync_fetch_and_add(&nr_core_sched_execed, 1); +} + +static void update_core_sched_head_seq(struct task_struct *p) +{ + struct task_ctx *tctx = bpf_task_storage_get(&task_ctx_stor, p, 0, 0); + int idx = weight_to_idx(p->scx.weight); + + if (tctx) + core_sched_head_seqs[idx] = tctx->core_sched_seq; + else + scx_bpf_error("task_ctx lookup failed"); } void BPF_STRUCT_OPS(qmap_dispatch, s32 cpu, struct task_struct *prev) @@ -244,6 +281,7 @@ void BPF_STRUCT_OPS(qmap_dispatch, s32 cpu, struct task_struct *prev) p = bpf_task_from_pid(pid); if (p) { + update_core_sched_head_seq(p); __sync_fetch_and_add(&nr_dispatched, 1); scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, slice_ns, 0); bpf_task_release(p); @@ -255,6 +293,49 @@ void BPF_STRUCT_OPS(qmap_dispatch, s32 cpu, struct task_struct *prev) } } +/* + * The distance from the head of the queue scaled by the weight of the queue. + * The lower the number, the older the task and the higher the priority. + */ +static s64 task_qdist(struct task_struct *p) +{ + int idx = weight_to_idx(p->scx.weight); + struct task_ctx *tctx; + s64 qdist; + + tctx = bpf_task_storage_get(&task_ctx_stor, p, 0, 0); + if (!tctx) { + scx_bpf_error("task_ctx lookup failed"); + return 0; + } + + qdist = tctx->core_sched_seq - core_sched_head_seqs[idx]; + + /* + * As queue index increments, the priority doubles. The queue w/ index 3 + * is dispatched twice more frequently than 2. Reflect the difference by + * scaling qdists accordingly. Note that the shift amount needs to be + * flipped depending on the sign to avoid flipping priority direction. + */ + if (qdist >= 0) + return qdist << (4 - idx); + else + return qdist << idx; +} + +/* + * This is called to determine the task ordering when core-sched is picking + * tasks to execute on SMT siblings and should encode about the same ordering as + * the regular scheduling path. Use the priority-scaled distances from the head + * of the queues to compare the two tasks which should be consistent with the + * dispatch path behavior. + */ +bool BPF_STRUCT_OPS(qmap_core_sched_before, + struct task_struct *a, struct task_struct *b) +{ + return task_qdist(a) > task_qdist(b); +} + void BPF_STRUCT_OPS(qmap_cpu_release, s32 cpu, struct scx_cpu_release_args *args) { u32 cnt; @@ -306,10 +387,12 @@ struct sched_ext_ops qmap_ops = { .enqueue = (void *)qmap_enqueue, .dequeue = (void *)qmap_dequeue, .dispatch = (void *)qmap_dispatch, + .core_sched_before = (void *)qmap_core_sched_before, .cpu_release = (void *)qmap_cpu_release, .prep_enable = (void *)qmap_prep_enable, .init = (void *)qmap_init, .exit = (void *)qmap_exit, + .flags = SCX_OPS_ENQ_LAST, .timeout_ms = 5000U, .name = "qmap", }; diff --git a/tools/sched_ext/scx_example_qmap.c b/tools/sched_ext/scx_example_qmap.c index de6f03ccb233..02fabe97ac9f 100644 --- a/tools/sched_ext/scx_example_qmap.c +++ b/tools/sched_ext/scx_example_qmap.c @@ -91,9 +91,10 @@ int main(int argc, char **argv) long nr_enqueued = skel->bss->nr_enqueued; long nr_dispatched = skel->bss->nr_dispatched; - printf("enq=%lu, dsp=%lu, delta=%ld, reenq=%lu, deq=%lu\n", + printf("enq=%lu, dsp=%lu, delta=%ld, reenq=%lu, deq=%lu, core=%lu\n", nr_enqueued, nr_dispatched, nr_enqueued - nr_dispatched, - skel->bss->nr_reenqueued, skel->bss->nr_dequeued); + skel->bss->nr_reenqueued, skel->bss->nr_dequeued, + skel->bss->nr_core_sched_execed); fflush(stdout); sleep(1); } From patchwork Sat Jan 28 00:16:37 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 13119583 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id C6ED8C54EAA for ; Sat, 28 Jan 2023 00:20:24 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S233337AbjA1AUX (ORCPT ); Fri, 27 Jan 2023 19:20:23 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:37276 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S233295AbjA1ATY (ORCPT ); Fri, 27 Jan 2023 19:19:24 -0500 Received: from mail-pf1-x42f.google.com (mail-pf1-x42f.google.com [IPv6:2607:f8b0:4864:20::42f]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 3AAB08CE34; Fri, 27 Jan 2023 16:18:10 -0800 (PST) Received: by mail-pf1-x42f.google.com with SMTP id n2so4365308pfo.3; Fri, 27 Jan 2023 16:18:10 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=pqxJ1g6CxYx/lcyLANomLQ9CE2/5W0IBdo+bU/vDLtw=; b=UtgqatyMcVyO1QOhSaxaHlJiJfTPMttaHqkqWqh17zxD6zTG7ArMDT+q/Cbj4g3+0G ij79NLuTIYrayYQa1eIzpk+/RzOTf/xYCbv+i5Nih3Rvlca9BIQIr1k2u7f6b5IVCs2I oei7qu1WpLhgFYasksBsJGLQal5D2U1Y3AXPPuOV8DfHeXrQsNuN+/6xi4RuuJrdwhNz r43591do9hpcQL3CTwM5aFxpn8zB9VDmYNBIcFRw6i/3rHoN5/T414qlScmCwqN8iMXJ bNH16nO0ah3MopbATZoIkApsTiRQwr5lf5r5wWkGjd7ZNlOA+qZtNhlVQNBlideY0uld OxLA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=pqxJ1g6CxYx/lcyLANomLQ9CE2/5W0IBdo+bU/vDLtw=; b=qbHI2P/s73y53kR33EcUX3SjJHlf2hp0vneYgh4lpweQt+3Z57rmhw2R9hncIN0GXm W+u4xeIkj27jsuGWsgTuSoaz31DJOyGjlL66fr28mFze+qeQfHckVRxf3vkw0h6HY5bR Op3JVZRird9f9ICB/OKULsSVwNRqqLslw7jPVq0iNh/S7a+ucQNCuIjn0XApV07xmTXI BkNKedJ0pIr0b+Sayi4LlwsC+CQjwCoMpLtS6V38fxBMEfnxWw/Fy1cELP3H8w+mqYYt APXRDw8+GEfKvk5keIIspnxq70N3LXtiWv947yvFrzXedwi+oQ8CBpaad4xqIAIdZUNp NqUQ== X-Gm-Message-State: AFqh2kpkVCfMvuRmNdzv76abiMkpX0lEYubyDx6P14qm1HvSMJgBLXyk mNPpN9UXQJnSSvmzDG5sloY= X-Google-Smtp-Source: AMrXdXtn137P4ptmNfi7jIAc/rEO42U4uLysblx2lCs+USIOE7GmohpGODNci2Sus7Sju4xLlqw77Q== X-Received: by 2002:aa7:8106:0:b0:58d:d546:8012 with SMTP id b6-20020aa78106000000b0058dd5468012mr34378835pfi.0.1674865064068; Fri, 27 Jan 2023 16:17:44 -0800 (PST) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id e14-20020aa7824e000000b0056283e2bdbdsm3170460pfn.138.2023.01.27.16.17.43 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 27 Jan 2023 16:17:43 -0800 (PST) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo , Bagas Sanjaya Subject: [PATCH 28/30] sched_ext: Documentation: scheduler: Document extensible scheduler class Date: Fri, 27 Jan 2023 14:16:37 -1000 Message-Id: <20230128001639.3510083-29-tj@kernel.org> X-Mailer: git-send-email 2.39.1 In-Reply-To: <20230128001639.3510083-1-tj@kernel.org> References: <20230128001639.3510083-1-tj@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org Add Documentation/scheduler/sched-ext.rst which gives a high-level overview and pointers to the examples. v2: Apply minor edits suggested by Bagas. Caveats section dropped as all of them are addressed. Signed-off-by: Tejun Heo Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden Cc: Bagas Sanjaya --- Documentation/scheduler/index.rst | 1 + Documentation/scheduler/sched-ext.rst | 224 ++++++++++++++++++++++++++ include/linux/sched/ext.h | 2 + kernel/Kconfig.preempt | 2 + kernel/sched/ext.c | 2 + kernel/sched/ext.h | 2 + 6 files changed, 233 insertions(+) create mode 100644 Documentation/scheduler/sched-ext.rst diff --git a/Documentation/scheduler/index.rst b/Documentation/scheduler/index.rst index b430d856056a..8a27a9967284 100644 --- a/Documentation/scheduler/index.rst +++ b/Documentation/scheduler/index.rst @@ -18,6 +18,7 @@ Linux Scheduler sched-nice-design sched-rt-group sched-stats + sched-ext sched-debug text_files diff --git a/Documentation/scheduler/sched-ext.rst b/Documentation/scheduler/sched-ext.rst new file mode 100644 index 000000000000..8a3626c884e7 --- /dev/null +++ b/Documentation/scheduler/sched-ext.rst @@ -0,0 +1,224 @@ +========================== +Extensible Scheduler Class +========================== + +sched_ext is a scheduler class whose behavior can be defined by a set of BPF +programs - the BPF scheduler. + +* sched_ext exports a full scheduling interface so that any scheduling + algorithm can be implemented on top. + +* The BPF scheduler can group CPUs however it sees fit and schedule them + together, as tasks aren't tied to specific CPUs at the time of wakeup. + +* The BPF scheduler can be turned on and off dynamically anytime. + +* The system integrity is maintained no matter what the BPF scheduler does. + The default scheduling behavior is restored anytime an error is detected, + a runnable task stalls, or on invoking the SysRq key sequence + :kbd:`SysRq-S`. + +Switching to and from sched_ext +=============================== + +``CONFIG_SCHED_CLASS_EXT`` is the config option to enable sched_ext and +``tools/sched_ext`` contains the example schedulers. + +sched_ext is used only when the BPF scheduler is loaded and running. + +If a task explicitly sets its scheduling policy to ``SCHED_EXT``, it will be +treated as ``SCHED_NORMAL`` and scheduled by CFS until the BPF scheduler is +loaded. On load, such tasks will be switched to and scheduled by sched_ext. + +The BPF scheduler can choose to schedule all normal and lower class tasks by +calling ``scx_bpf_switch_all()`` from its ``init()`` operation. In this +case, all ``SCHED_NORMAL``, ``SCHED_BATCH``, ``SCHED_IDLE`` and +``SCHED_EXT`` tasks are scheduled by sched_ext. In the example schedulers, +this mode can be selected with the ``-a`` option. + +Terminating the sched_ext scheduler program, triggering :kbd:`SysRq-S`, or +detection of any internal error including stalled runnable tasks aborts the +BPF scheduler and reverts all tasks back to CFS. + +.. code-block:: none + + # make -j16 -C tools/sched_ext + # tools/sched_ext/scx_example_dummy -a + local=0 global=3 + local=5 global=24 + local=9 global=44 + local=13 global=56 + local=17 global=72 + ^CEXIT: BPF scheduler unregistered + +If ``CONFIG_SCHED_DEBUG`` is set, the current status of the BPF scheduler +and whether a given task is on sched_ext can be determined as follows: + +.. code-block:: none + + # cat /sys/kernel/debug/sched/ext + ops : dummy + enabled : 1 + switching_all : 1 + switched_all : 1 + enable_state : enabled + + # grep ext /proc/self/sched + ext.enabled : 1 + +The Basics +========== + +Userspace can implement an arbitrary BPF scheduler by loading a set of BPF +programs that implement ``struct sched_ext_ops``. The only mandatory field +is ``ops.name`` which must be a valid BPF object name. All operations are +optional. The following modified excerpt is from +``tools/sched/scx_example_dummy.bpf.c`` showing a minimal global FIFO +scheduler. + +.. code-block:: c + + s32 BPF_STRUCT_OPS(dummy_init) + { + if (switch_all) + scx_bpf_switch_all(); + return 0; + } + + void BPF_STRUCT_OPS(dummy_enqueue, struct task_struct *p, u64 enq_flags) + { + if (enq_flags & SCX_ENQ_LOCAL) + scx_bpf_dispatch(p, SCX_DSQ_LOCAL, enq_flags); + else + scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, enq_flags); + } + + void BPF_STRUCT_OPS(dummy_exit, struct scx_exit_info *ei) + { + exit_type = ei->type; + } + + SEC(".struct_ops") + struct sched_ext_ops dummy_ops = { + .enqueue = (void *)dummy_enqueue, + .init = (void *)dummy_init, + .exit = (void *)dummy_exit, + .name = "dummy", + }; + +Dispatch Queues +--------------- + +To match the impedance between the scheduler core and the BPF scheduler, +sched_ext uses simple FIFOs called DSQs (dispatch queues). By default, there +is one global FIFO (``SCX_DSQ_GLOBAL``), and one local dsq per CPU +(``SCX_DSQ_LOCAL``). The BPF scheduler can manage an arbitrary number of +dsq's using ``scx_bpf_create_dsq()`` and ``scx_bpf_destroy_dsq()``. + +A CPU always executes a task from its local DSQ. A task is "dispatched" to a +DSQ. A non-local DSQ is "consumed" to transfer a task to the consuming CPU's +local DSQ. + +When a CPU is looking for the next task to run, if the local DSQ is not +empty, the first task is picked. Otherwise, the CPU tries to consume the +global DSQ. If that doesn't yield a runnable task either, ``ops.dispatch()`` +is invoked. + +Scheduling Cycle +---------------- + +The following briefly shows how a waking task is scheduled and executed. + +1. When a task is waking up, ``ops.select_cpu()`` is the first operation + invoked. This serves two purposes. First, CPU selection optimization + hint. Second, waking up the selected CPU if idle. + + The CPU selected by ``ops.select_cpu()`` is an optimization hint and not + binding. The actual decision is made at the last step of scheduling. + However, there is a small performance gain if the CPU + ``ops.select_cpu()`` returns matches the CPU the task eventually runs on. + + A side-effect of selecting a CPU is waking it up from idle. While a BPF + scheduler can wake up any cpu using the ``scx_bpf_kick_cpu()`` helper, + using ``ops.select_cpu()`` judiciously can be simpler and more efficient. + + Note that the scheduler core will ignore an invalid CPU selection, for + example, if it's outside the allowed cpumask of the task. + +2. Once the target CPU is selected, ``ops.enqueue()`` is invoked. It can + make one of the following decisions: + + * Immediately dispatch the task to either the global or local DSQ by + calling ``scx_bpf_dispatch()`` with ``SCX_DSQ_GLOBAL`` or + ``SCX_DSQ_LOCAL``, respectively. + + * Immediately dispatch the task to a custom DSQ by calling + ``scx_bpf_dispatch()`` with a DSQ ID which is smaller than 2^63. + + * Queue the task on the BPF side. + +3. When a CPU is ready to schedule, it first looks at its local DSQ. If + empty, it then looks at the global DSQ. If there still isn't a task to + run, ``ops.dispatch()`` is invoked which can use the following two + functions to populate the local DSQ. + + * ``scx_bpf_dispatch()`` dispatches a task to a DSQ. Any target DSQ can + be used - ``SCX_DSQ_LOCAL``, ``SCX_DSQ_LOCAL_ON | cpu``, + ``SCX_DSQ_GLOBAL`` or a custom DSQ. While ``scx_bpf_dispatch()`` + currently can't be called with BPF locks held, this is being worked on + and will be supported. ``scx_bpf_dispatch()`` schedules dispatching + rather than performing them immediately. There can be up to + ``ops.dispatch_max_batch`` pending tasks. + + * ``scx_bpf_consume()`` tranfers a task from the specified non-local DSQ + to the dispatching DSQ. This function cannot be called with any BPF + locks held. ``scx_bpf_consume()`` flushes the pending dispatched tasks + before trying to consume the specified DSQ. + +4. After ``ops.dispatch()`` returns, if there are tasks in the local DSQ, + the CPU runs the first one. If empty, the following steps are taken: + + * Try to consume the global DSQ. If successful, run the task. + + * If ``ops.dispatch()`` has dispatched any tasks, retry #3. + + * If the previous task is an SCX task and still runnable, keep executing + it (see ``SCX_OPS_ENQ_LAST``). + + * Go idle. + +Note that the BPF scheduler can always choose to dispatch tasks immediately +in ``ops.enqueue()`` as illustrated in the above dummy example. If only the +built-in DSQs are used, there is no need to implement ``ops.dispatch()`` as +a task is never queued on the BPF scheduler and both the local and global +DSQs are consumed automatically. + +Where to Look +============= + +* ``include/linux/sched/ext.h`` defines the core data structures, ops table + and constants. + +* ``kernel/sched/ext.c`` contains sched_ext core implementation and helpers. + The functions prefixed with ``scx_bpf_`` can be called from the BPF + scheduler. + +* ``tools/sched_ext/`` hosts example BPF scheduler implementations. + + * ``scx_example_dummy[.bpf].c``: Minimal global FIFO scheduler example + using a custom DSQ. + + * ``scx_example_qmap[.bpf].c``: A multi-level FIFO scheduler supporting + five levels of priority implemented with ``BPF_MAP_TYPE_QUEUE``. + +ABI Instability +=============== + +The APIs provided by sched_ext to BPF schedulers programs have no stability +guarantees. This includes the ops table callbacks and constants defined in +``include/linux/sched/ext.h``, as well as the ``scx_bpf_`` kfuncs defined in +``kernel/sched/ext.c``. + +While we will attempt to provide a relatively stable API surface when +possible, they are subject to change without warning between kernel +versions. diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h index d3c2701bb4b4..6b230ecdcfa4 100644 --- a/include/linux/sched/ext.h +++ b/include/linux/sched/ext.h @@ -1,5 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* + * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst + * * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. * Copyright (c) 2022 Tejun Heo * Copyright (c) 2022 David Vernet diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt index e12a057ead7b..bae49b743834 100644 --- a/kernel/Kconfig.preempt +++ b/kernel/Kconfig.preempt @@ -154,3 +154,5 @@ config SCHED_CLASS_EXT wish to implement scheduling policies. The struct_ops structure exported by sched_ext is struct sched_ext_ops, and is conceptually similar to struct sched_class. + + See Documentation/scheduler/sched-ext.rst for more details. diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 8619eb2dcbd5..828082e6e780 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -1,5 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* + * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst + * * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. * Copyright (c) 2022 Tejun Heo * Copyright (c) 2022 David Vernet diff --git a/kernel/sched/ext.h b/kernel/sched/ext.h index c3df39984fc9..4252296ba464 100644 --- a/kernel/sched/ext.h +++ b/kernel/sched/ext.h @@ -1,5 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* + * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst + * * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. * Copyright (c) 2022 Tejun Heo * Copyright (c) 2022 David Vernet From patchwork Sat Jan 28 00:16:38 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 13119584 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id EE5EFC54EAA for ; Sat, 28 Jan 2023 00:20:33 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S232783AbjA1AUc (ORCPT ); Fri, 27 Jan 2023 19:20:32 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:36746 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S232782AbjA1AUD (ORCPT ); Fri, 27 Jan 2023 19:20:03 -0500 Received: from mail-pg1-x52f.google.com (mail-pg1-x52f.google.com [IPv6:2607:f8b0:4864:20::52f]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id E8BFA8D08F; Fri, 27 Jan 2023 16:18:22 -0800 (PST) Received: by mail-pg1-x52f.google.com with SMTP id r18so4227334pgr.12; Fri, 27 Jan 2023 16:18:22 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=5/pJqABhSvu2flXAOqTCkfW9mcu+fF0mBbb0NhpvQjM=; b=nZ1xJio2tJa531iqy96yP6fXY/ZMnxt5/enOMKDHZZAcZaYWyRgzKhhWGt5vbjIo9P uz53jnjPBla8C60KWTCm04Y6R2gBitqmCr4yt0aoer+nsMoNF5tRI1di5V63b0uBTh9+ JzLyTCUhIXWo4FlNax6AIscKiCdbEx2p4kIvWE2fVyUxPHzfnxH7waZotgWGQYoXdbcZ 39dB9nV2dxjJ95GljMHMMSAYpb4l1RlJ5MeavT7SwH+MWD6AsEaRyI6AgzaifjIuR75S 9xzPrB8rsQDqat1dssj7SYRzO6TT6jY0ROmGHwr2CyoHOh/+OliFGqtM1EvoEbuUz7OY G3WQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=5/pJqABhSvu2flXAOqTCkfW9mcu+fF0mBbb0NhpvQjM=; b=1e2sE0QdzyNrPMLtE3EKm8s1J9mmmA3bh1QDWA0rckLM0oUPnG/f9mMynvzhVbuycl eQtew4xJL4sIdC+V+c6wdjuyHNDV/st52eg9H/rEQqsxcxy3lcrPsPvizddnVyykmw4K mCuu/YKCdCpCw+ygSLKHI59pKZ3a7I6Ae/ab0NRMS+NwMSkUq7Y9b9ZzQKVLl/2hX3a9 Z5bwybHtHAorGq/7H/CeaxB332VWZQuuneRyBkeOOqJC0iGeFLoclA2VcItdRHcLc0kw ihW/g2yWg4gezzEcOs0nHK6qvShgD0+d3ykMMv+/MWKCdLyEd5lAFXnbscosrx98IQ3r Tjiw== X-Gm-Message-State: AO0yUKUxFWfwtBNXIKgF2Jh8QRTD54NAPmSUBpU2HmApz9K1ze0zHJPH qrMw0foH/0Yqqg3W2PB6MVo= X-Google-Smtp-Source: AK7set+JCSh26v24zZZDtkWZC4CzgqJxk4kY6saDEL7UThhDorx5l0ayQKmIupOGJycUi860X4emFQ== X-Received: by 2002:aa7:9831:0:b0:58b:5f9d:c2fe with SMTP id q17-20020aa79831000000b0058b5f9dc2femr405524pfl.29.1674865065901; Fri, 27 Jan 2023 16:17:45 -0800 (PST) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id c62-20020a621c41000000b00580cc63dce8sm3165177pfc.77.2023.01.27.16.17.45 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 27 Jan 2023 16:17:45 -0800 (PST) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 29/30] sched_ext: Add a basic, userland vruntime scheduler Date: Fri, 27 Jan 2023 14:16:38 -1000 Message-Id: <20230128001639.3510083-30-tj@kernel.org> X-Mailer: git-send-email 2.39.1 In-Reply-To: <20230128001639.3510083-1-tj@kernel.org> References: <20230128001639.3510083-1-tj@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org From: David Vernet This patch adds a new scx_example_userland BPF scheduler that implements a fairly unsophisticated sorted-list vruntime scheduler in userland to demonstrate how most scheduling decisions can be delegated to userland. The scheduler doesn't implement load balancing, and treats all tasks as part of a single domain. Signed-off-by: David Vernet Reviewed-by: Tejun Heo Signed-off-by: Tejun Heo --- tools/sched_ext/.gitignore | 1 + tools/sched_ext/Makefile | 10 +- tools/sched_ext/scx_example_userland.bpf.c | 275 ++++++++++++ tools/sched_ext/scx_example_userland.c | 403 ++++++++++++++++++ tools/sched_ext/scx_example_userland_common.h | 19 + 5 files changed, 706 insertions(+), 2 deletions(-) create mode 100644 tools/sched_ext/scx_example_userland.bpf.c create mode 100644 tools/sched_ext/scx_example_userland.c create mode 100644 tools/sched_ext/scx_example_userland_common.h diff --git a/tools/sched_ext/.gitignore b/tools/sched_ext/.gitignore index ebc34dcf925b..75a536dfebfc 100644 --- a/tools/sched_ext/.gitignore +++ b/tools/sched_ext/.gitignore @@ -2,6 +2,7 @@ scx_example_dummy scx_example_qmap scx_example_central scx_example_pair +scx_example_userland *.skel.h *.subskel.h /tools/ diff --git a/tools/sched_ext/Makefile b/tools/sched_ext/Makefile index 2303736698a2..fcb4faa75e37 100644 --- a/tools/sched_ext/Makefile +++ b/tools/sched_ext/Makefile @@ -115,7 +115,8 @@ BPF_CFLAGS = -g -D__TARGET_ARCH_$(SRCARCH) \ -Wno-compare-distinct-pointer-types \ -O2 -mcpu=v3 -all: scx_example_dummy scx_example_qmap scx_example_central scx_example_pair +all: scx_example_dummy scx_example_qmap scx_example_central scx_example_pair \ + scx_example_userland # sort removes libbpf duplicates when not cross-building MAKE_DIRS := $(sort $(BUILD_DIR)/libbpf $(HOST_BUILD_DIR)/libbpf \ @@ -182,11 +183,16 @@ scx_example_pair: scx_example_pair.c scx_example_pair.skel.h user_exit_info.h $(CC) $(CFLAGS) -c $< -o $@.o $(CC) -o $@ $@.o $(HOST_BPFOBJ) $(LDFLAGS) +scx_example_userland: scx_example_userland.c scx_example_userland.skel.h \ + scx_example_userland_common.h user_exit_info.h + $(CC) $(CFLAGS) -c $< -o $@.o + $(CC) -o $@ $@.o $(HOST_BPFOBJ) $(LDFLAGS) + clean: rm -rf $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) rm -f *.o *.bpf.o *.skel.h *.subskel.h rm -f scx_example_dummy scx_example_qmap scx_example_central \ - scx_example_pair + scx_example_pair scx_example_userland .PHONY: all clean diff --git a/tools/sched_ext/scx_example_userland.bpf.c b/tools/sched_ext/scx_example_userland.bpf.c new file mode 100644 index 000000000000..65362d5d5a60 --- /dev/null +++ b/tools/sched_ext/scx_example_userland.bpf.c @@ -0,0 +1,275 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * A minimal userland scheduler. + * + * In terms of scheduling, this provides two different types of behaviors: + * 1. A global FIFO scheduling order for _any_ tasks that have CPU affinity. + * All such tasks are direct-dispatched from the kernel, and are never + * enqueued in user space. + * 2. A primitive vruntime scheduler that is implemented in user space, for all + * other tasks. + * + * Some parts of this example user space scheduler could be implemented more + * efficiently using more complex and sophisticated data structures. For + * example, rather than using BPF_MAP_TYPE_QUEUE's, + * BPF_MAP_TYPE_{USER_}RINGBUF's could be used for exchanging messages between + * user space and kernel space. Similarly, we use a simple vruntime-sorted list + * in user space, but an rbtree could be used instead. + * + * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2022 Tejun Heo + * Copyright (c) 2022 David Vernet + */ +#include +#include "scx_common.bpf.h" +#include "scx_example_userland_common.h" + +char _license[] SEC("license") = "GPL"; + +const volatile bool switch_all; +const volatile u32 num_possible_cpus; +const volatile s32 usersched_pid; + +/* Stats that are printed by user space. */ +u64 nr_failed_enqueues, nr_kernel_enqueues, nr_user_enqueues; + +struct user_exit_info uei; + +/* + * Whether the user space scheduler needs to be scheduled due to a task being + * enqueued in user space. + */ +static bool usersched_needed; + +/* + * The map containing tasks that are enqueued in user space from the kernel. + * + * This map is drained by the user space scheduler. + */ +struct { + __uint(type, BPF_MAP_TYPE_QUEUE); + __uint(max_entries, USERLAND_MAX_TASKS); + __type(value, struct scx_userland_enqueued_task); +} enqueued SEC(".maps"); + +/* + * The map containing tasks that are dispatched to the kernel from user space. + * + * Drained by the kernel in userland_dispatch(). + */ +struct { + __uint(type, BPF_MAP_TYPE_QUEUE); + __uint(max_entries, USERLAND_MAX_TASKS); + __type(value, s32); +} dispatched SEC(".maps"); + +/* Per-task scheduling context */ +struct task_ctx { + bool force_local; /* Dispatch directly to local DSQ */ +}; + +/* Map that contains task-local storage. */ +struct { + __uint(type, BPF_MAP_TYPE_TASK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct task_ctx); +} task_ctx_stor SEC(".maps"); + +static bool is_usersched_task(const struct task_struct *p) +{ + return p->pid == usersched_pid; +} + +static bool keep_in_kernel(const struct task_struct *p) +{ + return p->nr_cpus_allowed < num_possible_cpus; +} + +static struct task_struct *usersched_task(void) +{ + struct task_struct *p; + + p = bpf_task_from_pid(usersched_pid); + /* + * Should never happen -- the usersched task should always be managed + * by sched_ext. + */ + if (!p) { + scx_bpf_error("Failed to find usersched task %d", usersched_pid); + /* + * We should never hit this path, and we error out of the + * scheduler above just in case, so the scheduler will soon be + * be evicted regardless. So as to simplify the logic in the + * caller to not have to check for NULL, return an acquired + * reference to the current task here rather than NULL. + */ + return bpf_task_acquire(bpf_get_current_task_btf()); + } + + return p; +} + +s32 BPF_STRUCT_OPS(userland_select_cpu, struct task_struct *p, + s32 prev_cpu, u64 wake_flags) +{ + if (keep_in_kernel(p)) { + s32 cpu; + struct task_ctx *tctx; + + tctx = bpf_task_storage_get(&task_ctx_stor, p, 0, 0); + if (!tctx) { + scx_bpf_error("Failed to look up task-local storage for %s", p->comm); + return -ESRCH; + } + + if (p->nr_cpus_allowed == 1 || + scx_bpf_test_and_clear_cpu_idle(prev_cpu)) { + tctx->force_local = true; + return prev_cpu; + } + + cpu = scx_bpf_pick_idle_cpu(p->cpus_ptr); + if (cpu >= 0) { + tctx->force_local = true; + return cpu; + } + } + + return prev_cpu; +} + +static void dispatch_user_scheduler(void) +{ + struct task_struct *p; + + usersched_needed = false; + p = usersched_task(); + scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0); + bpf_task_release(p); +} + +static void enqueue_task_in_user_space(struct task_struct *p, u64 enq_flags) +{ + struct scx_userland_enqueued_task task; + + memset(&task, 0, sizeof(task)); + task.pid = p->pid; + task.sum_exec_runtime = p->se.sum_exec_runtime; + task.weight = p->scx.weight; + + if (bpf_map_push_elem(&enqueued, &task, 0)) { + /* + * If we fail to enqueue the task in user space, put it + * directly on the global DSQ. + */ + __sync_fetch_and_add(&nr_failed_enqueues, 1); + scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags); + } else { + __sync_fetch_and_add(&nr_user_enqueues, 1); + usersched_needed = true; + } +} + +void BPF_STRUCT_OPS(userland_enqueue, struct task_struct *p, u64 enq_flags) +{ + if (keep_in_kernel(p)) { + u64 dsq_id = SCX_DSQ_GLOBAL; + struct task_ctx *tctx; + + tctx = bpf_task_storage_get(&task_ctx_stor, p, 0, 0); + if (!tctx) { + scx_bpf_error("Failed to lookup task ctx for %s", p->comm); + return; + } + + if (tctx->force_local) + dsq_id = SCX_DSQ_LOCAL; + tctx->force_local = false; + scx_bpf_dispatch(p, dsq_id, SCX_SLICE_DFL, enq_flags); + __sync_fetch_and_add(&nr_kernel_enqueues, 1); + return; + } else if (!is_usersched_task(p)) { + enqueue_task_in_user_space(p, enq_flags); + } +} + +static int drain_dispatch_q_loopfn(u32 idx, void *data) +{ + s32 cpu = *(s32 *)data; + s32 pid; + struct task_struct *p; + + if (bpf_map_pop_elem(&dispatched, &pid)) + return 1; + + /* + * The task could have exited by the time we get around to dispatching + * it. Treat this as a normal occurrence, and simply move onto the next + * iteration. + */ + p = bpf_task_from_pid(pid); + if (!p) + return 0; + + scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0); + bpf_task_release(p); + return 0; +} + +void BPF_STRUCT_OPS(userland_dispatch, s32 cpu, struct task_struct *prev) +{ + if (usersched_needed) + dispatch_user_scheduler(); + + /* XXX: Use an iterator when it's available. */ + bpf_loop(4096, drain_dispatch_q_loopfn, &cpu, 0); +} + +s32 BPF_STRUCT_OPS(userland_prep_enable, struct task_struct *p, + struct scx_enable_args *args) +{ + if (bpf_task_storage_get(&task_ctx_stor, p, 0, + BPF_LOCAL_STORAGE_GET_F_CREATE)) + return 0; + else + return -ENOMEM; +} + +s32 BPF_STRUCT_OPS(userland_init) +{ + int ret; + + if (num_possible_cpus == 0) { + scx_bpf_error("User scheduler # CPUs uninitialized (%d)", + num_possible_cpus); + return -EINVAL; + } + + if (usersched_pid <= 0) { + scx_bpf_error("User scheduler pid uninitialized (%d)", + usersched_pid); + return -EINVAL; + } + + if (switch_all) + scx_bpf_switch_all(); + return 0; +} + +void BPF_STRUCT_OPS(userland_exit, struct scx_exit_info *ei) +{ + uei_record(&uei, ei); +} + +SEC(".struct_ops") +struct sched_ext_ops userland_ops = { + .select_cpu = (void *)userland_select_cpu, + .enqueue = (void *)userland_enqueue, + .dispatch = (void *)userland_dispatch, + .prep_enable = (void *)userland_prep_enable, + .init = (void *)userland_init, + .exit = (void *)userland_exit, + .timeout_ms = 3000, + .name = "userland", +}; diff --git a/tools/sched_ext/scx_example_userland.c b/tools/sched_ext/scx_example_userland.c new file mode 100644 index 000000000000..4ddd257b9e42 --- /dev/null +++ b/tools/sched_ext/scx_example_userland.c @@ -0,0 +1,403 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * A demo sched_ext user space scheduler which provides vruntime semantics + * using a simple ordered-list implementation. + * + * Each CPU in the system resides in a single, global domain. This precludes + * the need to do any load balancing between domains. The scheduler could + * easily be extended to support multiple domains, with load balancing + * happening in user space. + * + * Any task which has any CPU affinity is scheduled entirely in BPF. This + * program only schedules tasks which may run on any CPU. + * + * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2022 Tejun Heo + * Copyright (c) 2022 David Vernet + */ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "user_exit_info.h" +#include "scx_example_userland_common.h" +#include "scx_example_userland.skel.h" + +const char help_fmt[] = +"A minimal userland sched_ext scheduler.\n" +"\n" +"See the top-level comment in .bpf.c for more details.\n" +"\n" +"Usage: %s [-a]\n" +"\n" +" -a Switch all tasks\n" +" -b The number of tasks to batch when dispatching.\n" +" Defaults to 8\n" +" -h Display this help and exit\n"; + +/* Defined in UAPI */ +#define SCHED_EXT 7 + +/* Number of tasks to batch when dispatching to user space. */ +static __u32 batch_size = 8; + +static volatile int exit_req; +static int enqueued_fd, dispatched_fd; + +static struct scx_example_userland *skel; +static struct bpf_link *ops_link; + +/* Stats collected in user space. */ +static __u64 nr_vruntime_enqueues, nr_vruntime_dispatches; + +/* The data structure containing tasks that are enqueued in user space. */ +struct enqueued_task { + LIST_ENTRY(enqueued_task) entries; + __u64 sum_exec_runtime; + double vruntime; +}; + +/* + * Use a vruntime-sorted list to store tasks. This could easily be extended to + * a more optimal data structure, such as an rbtree as is done in CFS. We + * currently elect to use a sorted list to simplify the example for + * illustrative purposes. + */ +LIST_HEAD(listhead, enqueued_task); + +/* + * A vruntime-sorted list of tasks. The head of the list contains the task with + * the lowest vruntime. That is, the task that has the "highest" claim to be + * scheduled. + */ +static struct listhead vruntime_head = LIST_HEAD_INITIALIZER(vruntime_head); + +/* + * The statically allocated array of tasks. We use a statically allocated list + * here to avoid having to allocate on the enqueue path, which could cause a + * deadlock. A more substantive user space scheduler could e.g. provide a hook + * for newly enabled tasks that are passed to the scheduler from the + * .prep_enable() callback to allows the scheduler to allocate on safe paths. + */ +struct enqueued_task tasks[USERLAND_MAX_TASKS]; + +static double min_vruntime; + +static void sigint_handler(int userland) +{ + exit_req = 1; +} + +static __u32 task_pid(const struct enqueued_task *task) +{ + return ((uintptr_t)task - (uintptr_t)tasks) / sizeof(*task); +} + +static int dispatch_task(s32 pid) +{ + int err; + + err = bpf_map_update_elem(dispatched_fd, NULL, &pid, 0); + if (err) { + fprintf(stderr, "Failed to dispatch task %d\n", pid); + exit_req = 1; + } else { + nr_vruntime_dispatches++; + } + + return err; +} + +static struct enqueued_task *get_enqueued_task(__s32 pid) +{ + if (pid >= USERLAND_MAX_TASKS) + return NULL; + + return &tasks[pid]; +} + +static double calc_vruntime_delta(__u64 weight, __u64 delta) +{ + double weight_f = (double)weight / 100.0; + double delta_f = (double)delta; + + return delta_f / weight_f; +} + +static void update_enqueued(struct enqueued_task *enqueued, const struct scx_userland_enqueued_task *bpf_task) +{ + __u64 delta; + + delta = bpf_task->sum_exec_runtime - enqueued->sum_exec_runtime; + + enqueued->vruntime += calc_vruntime_delta(bpf_task->weight, delta); + if (min_vruntime > enqueued->vruntime) + enqueued->vruntime = min_vruntime; + enqueued->sum_exec_runtime = bpf_task->sum_exec_runtime; +} + +static int vruntime_enqueue(const struct scx_userland_enqueued_task *bpf_task) +{ + struct enqueued_task *curr, *enqueued, *prev; + + curr = get_enqueued_task(bpf_task->pid); + if (!curr) + return ENOENT; + + update_enqueued(curr, bpf_task); + nr_vruntime_enqueues++; + + /* + * Enqueue the task in a vruntime-sorted list. A more optimal data + * structure such as an rbtree could easily be used as well. We elect + * to use a list here simply because it's less code, and thus the + * example is less convoluted and better serves to illustrate what a + * user space scheduler could look like. + */ + + if (LIST_EMPTY(&vruntime_head)) { + LIST_INSERT_HEAD(&vruntime_head, curr, entries); + return 0; + } + + LIST_FOREACH(enqueued, &vruntime_head, entries) { + if (curr->vruntime <= enqueued->vruntime) { + LIST_INSERT_BEFORE(enqueued, curr, entries); + return 0; + } + prev = enqueued; + } + + LIST_INSERT_AFTER(prev, curr, entries); + + return 0; +} + +static void drain_enqueued_map(void) +{ + while (1) { + struct scx_userland_enqueued_task task; + int err; + + if (bpf_map_lookup_and_delete_elem(enqueued_fd, NULL, &task)) + return; + + err = vruntime_enqueue(&task); + if (err) { + fprintf(stderr, "Failed to enqueue task %d: %s\n", + task.pid, strerror(err)); + exit_req = 1; + return; + } + } +} + +static void dispatch_batch(void) +{ + __u32 i; + + for (i = 0; i < batch_size; i++) { + struct enqueued_task *task; + int err; + __s32 pid; + + task = LIST_FIRST(&vruntime_head); + if (!task) + return; + + min_vruntime = task->vruntime; + pid = task_pid(task); + LIST_REMOVE(task, entries); + err = dispatch_task(pid); + if (err) { + fprintf(stderr, "Failed to dispatch task %d in %u\n", + pid, i); + return; + } + } +} + +static void *run_stats_printer(void *arg) +{ + while (!exit_req) { + __u64 nr_failed_enqueues, nr_kernel_enqueues, nr_user_enqueues, total; + + nr_failed_enqueues = skel->bss->nr_failed_enqueues; + nr_kernel_enqueues = skel->bss->nr_kernel_enqueues; + nr_user_enqueues = skel->bss->nr_user_enqueues; + total = nr_failed_enqueues + nr_kernel_enqueues + nr_user_enqueues; + + printf("o-----------------------o\n"); + printf("| BPF ENQUEUES |\n"); + printf("|-----------------------|\n"); + printf("| kern: %10llu |\n", nr_kernel_enqueues); + printf("| user: %10llu |\n", nr_user_enqueues); + printf("| failed: %10llu |\n", nr_failed_enqueues); + printf("| -------------------- |\n"); + printf("| total: %10llu |\n", total); + printf("| |\n"); + printf("|-----------------------|\n"); + printf("| VRUNTIME / USER |\n"); + printf("|-----------------------|\n"); + printf("| enq: %10llu |\n", nr_vruntime_enqueues); + printf("| disp: %10llu |\n", nr_vruntime_dispatches); + printf("o-----------------------o\n"); + printf("\n\n"); + sleep(1); + } + + return NULL; +} + +static int spawn_stats_thread(void) +{ + pthread_t stats_printer; + + return pthread_create(&stats_printer, NULL, run_stats_printer, NULL); +} + +static int bootstrap(int argc, char **argv) +{ + int err; + __u32 opt; + struct sched_param sched_param = { + .sched_priority = sched_get_priority_max(SCHED_EXT), + }; + bool switch_all = false; + + signal(SIGINT, sigint_handler); + signal(SIGTERM, sigint_handler); + libbpf_set_strict_mode(LIBBPF_STRICT_ALL); + + /* + * Enforce that the user scheduler task is managed by sched_ext. The + * task eagerly drains the list of enqueued tasks in its main work + * loop, and then yields the CPU. The BPF scheduler only schedules the + * user space scheduler task when at least one other task in the system + * needs to be scheduled. + */ + err = syscall(__NR_sched_setscheduler, getpid(), SCHED_EXT, &sched_param); + if (err) { + fprintf(stderr, "Failed to set scheduler to SCHED_EXT: %s\n", strerror(err)); + return err; + } + + while ((opt = getopt(argc, argv, "ahb:")) != -1) { + switch (opt) { + case 'a': + switch_all = true; + break; + case 'b': + batch_size = strtoul(optarg, NULL, 0); + break; + default: + fprintf(stderr, help_fmt, basename(argv[0])); + exit(opt != 'h'); + } + } + + /* + * It's not always safe to allocate in a user space scheduler, as an + * enqueued task could hold a lock that we require in order to be able + * to allocate. + */ + err = mlockall(MCL_CURRENT | MCL_FUTURE); + if (err) { + fprintf(stderr, "Failed to prefault and lock address space: %s\n", + strerror(err)); + return err; + } + + skel = scx_example_userland__open(); + if (!skel) { + fprintf(stderr, "Failed to open scheduler: %s\n", strerror(errno)); + return errno; + } + skel->rodata->num_possible_cpus = libbpf_num_possible_cpus(); + assert(skel->rodata->num_possible_cpus > 0); + skel->rodata->usersched_pid = getpid(); + assert(skel->rodata->usersched_pid > 0); + skel->rodata->switch_all = switch_all; + + err = scx_example_userland__load(skel); + if (err) { + fprintf(stderr, "Failed to load scheduler: %s\n", strerror(err)); + goto destroy_skel; + } + + enqueued_fd = bpf_map__fd(skel->maps.enqueued); + dispatched_fd = bpf_map__fd(skel->maps.dispatched); + assert(enqueued_fd > 0); + assert(dispatched_fd > 0); + + err = spawn_stats_thread(); + if (err) { + fprintf(stderr, "Failed to spawn stats thread: %s\n", strerror(err)); + goto destroy_skel; + } + + ops_link = bpf_map__attach_struct_ops(skel->maps.userland_ops); + if (!ops_link) { + fprintf(stderr, "Failed to attach struct ops: %s\n", strerror(errno)); + err = errno; + goto destroy_skel; + } + + return 0; + +destroy_skel: + scx_example_userland__destroy(skel); + exit_req = 1; + return err; +} + +static void sched_main_loop(void) +{ + while (!exit_req) { + /* + * Perform the following work in the main user space scheduler + * loop: + * + * 1. Drain all tasks from the enqueued map, and enqueue them + * to the vruntime sorted list. + * + * 2. Dispatch a batch of tasks from the vruntime sorted list + * down to the kernel. + * + * 3. Yield the CPU back to the system. The BPF scheduler will + * reschedule the user space scheduler once another task has + * been enqueued to user space. + */ + drain_enqueued_map(); + dispatch_batch(); + sched_yield(); + } +} + +int main(int argc, char **argv) +{ + int err; + + err = bootstrap(argc, argv); + if (err) { + fprintf(stderr, "Failed to bootstrap scheduler: %s\n", strerror(err)); + return err; + } + + sched_main_loop(); + + exit_req = 1; + bpf_link__destroy(ops_link); + uei_print(&skel->bss->uei); + scx_example_userland__destroy(skel); + return 0; +} diff --git a/tools/sched_ext/scx_example_userland_common.h b/tools/sched_ext/scx_example_userland_common.h new file mode 100644 index 000000000000..639c6809c5ff --- /dev/null +++ b/tools/sched_ext/scx_example_userland_common.h @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2022 Meta, Inc */ + +#ifndef __SCX_USERLAND_COMMON_H +#define __SCX_USERLAND_COMMON_H + +#define USERLAND_MAX_TASKS 8192 + +/* + * An instance of a task that has been enqueued by the kernel for consumption + * by a user space global scheduler thread. + */ +struct scx_userland_enqueued_task { + __s32 pid; + u64 sum_exec_runtime; + u64 weight; +}; + +#endif // __SCX_USERLAND_COMMON_H From patchwork Sat Jan 28 00:16:39 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Tejun Heo X-Patchwork-Id: 13119585 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 01FAFC54EAA for ; Sat, 28 Jan 2023 00:20:38 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S233032AbjA1AUh (ORCPT ); Fri, 27 Jan 2023 19:20:37 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:36582 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S233044AbjA1AUI (ORCPT ); Fri, 27 Jan 2023 19:20:08 -0500 Received: from mail-pf1-x42e.google.com (mail-pf1-x42e.google.com [IPv6:2607:f8b0:4864:20::42e]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id C06258D0BF; Fri, 27 Jan 2023 16:18:27 -0800 (PST) Received: by mail-pf1-x42e.google.com with SMTP id g9so4368496pfo.5; Fri, 27 Jan 2023 16:18:27 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:from:to:cc:subject:date :message-id:reply-to; bh=/RFQgOqIi06BD6hRbYTcXyM+6XH+5DwTwQb1rdOUUH8=; b=X+NrKfDHMRzShdpNgAdJdKZMwXohdlp67oADr7COS7UF1nFdOtfymk9cwn2wvIKIXu SeCh/3/K+sx1q4Sbv401cEs4lsCuoGW8Dr6pZPz6CjHkX8B6Q3LgsHffdSpk40wqQNCv 0GPJ98ySooHtgoeO3FTYxzRaHARa/Bur4Au9VrFK/cnnnoWRBX7jGiqLIxqdbi6bfx8m 0+Lwk9bi83fje2RJ+4CGCwKZXWe8p3xMOIwTfPZS/+rCZT7c6Cv1Sx7UheaTOxI2IgDh Anp5kdHfT3Ta9RT5ZL+tGpO6VoukZjhHH9QFE3canXlh5fryAbhXs/IascqBKB7Evo5h r2pQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:sender:x-gm-message-state:from :to:cc:subject:date:message-id:reply-to; bh=/RFQgOqIi06BD6hRbYTcXyM+6XH+5DwTwQb1rdOUUH8=; b=SUiRBXE/4YymWjFS9bXQ/nRxLw7v4PFPP9tyAKuRhtwVBtPPwAzIm/Zjzd0u/tJQev JrYWwwmVZdQ++VjjlJGY0msU+HfxxqoSOazt+WP90royPHr3D/ESjAUDSRKaQ5w/nmqw 8RHH7AfmxKL3+YFOX3ifJ08//GwfPd9M9VXCa8zIwvP2QuL1vkf756SXgMqiSw5XewUg 7Abb0y138b1PjVRjXeaoqnNoZcvradirp5nh0QAzUie0Jk5z5Tojl5JNa40VJ3onxO/A 7JZy7vWiUxIALqubvWeGLDW9AqV9zf2CTdrYYI0uM9WpglCMAzs6ceTb4OpQvUVMiuAa mtTw== X-Gm-Message-State: AO0yUKV4417MMpm25Xl8Zq0zwPMGO737n702ZJxdByrM7dhDnRU6cnr2 Rdnfi/XAxPfvQKjbK/w5G+HqY83kXT8= X-Google-Smtp-Source: AK7set+u2mKT2yfeTuY/fzPJvKhWdsKHyMH137ClVvwq6ZjC6HMLcFEvlkqGujxrk8ARz5jXhIQWDA== X-Received: by 2002:aa7:85c3:0:b0:592:629a:c9b5 with SMTP id z3-20020aa785c3000000b00592629ac9b5mr3592068pfn.14.1674865067876; Fri, 27 Jan 2023 16:17:47 -0800 (PST) Received: from localhost (2603-800c-1a02-1bae-a7fa-157f-969a-4cde.res6.spectrum.com. [2603:800c:1a02:1bae:a7fa:157f:969a:4cde]) by smtp.gmail.com with ESMTPSA id a139-20020a621a91000000b0058e12371d96sm3137584pfa.164.2023.01.27.16.17.47 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 27 Jan 2023 16:17:47 -0800 (PST) Sender: Tejun Heo From: Tejun Heo To: torvalds@linux-foundation.org, mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com, vincent.guittot@linaro.org, dietmar.eggemann@arm.com, rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de, bristot@redhat.com, vschneid@redhat.com, ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org, martin.lau@kernel.org, joshdon@google.com, brho@google.com, pjt@google.com, derkling@google.com, haoluo@google.com, dvernet@meta.com, dschatzberg@meta.com, dskarlat@cs.cmu.edu, riel@surriel.com Cc: linux-kernel@vger.kernel.org, bpf@vger.kernel.org, kernel-team@meta.com, Tejun Heo Subject: [PATCH 30/30] sched_ext: Add a rust userspace hybrid example scheduler Date: Fri, 27 Jan 2023 14:16:39 -1000 Message-Id: <20230128001639.3510083-31-tj@kernel.org> X-Mailer: git-send-email 2.39.1 In-Reply-To: <20230128001639.3510083-1-tj@kernel.org> References: <20230128001639.3510083-1-tj@kernel.org> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org From: Dan Schatzberg Atropos is a multi-domain BPF / userspace hybrid scheduler where the BPF part does simple round robin in each domain and the userspace part calculates the load factor of each domain and tells the BPF part how to load balance the domains. This scheduler demonstrates dividing scheduling logic between BPF and userspace and using rust to build the userspace part. An earlier variant of this scheduler was used to balance across six domains, each representing a chiplet in a six-chiplet AMD processor, and could match the performance of production setup using CFS. v2: Updated to use generic BPF cpumask helpers. Signed-off-by: Dan Schatzberg Signed-off-by: Tejun Heo --- tools/sched_ext/Makefile | 13 +- tools/sched_ext/atropos/.gitignore | 3 + tools/sched_ext/atropos/Cargo.toml | 28 + tools/sched_ext/atropos/build.rs | 70 ++ tools/sched_ext/atropos/rustfmt.toml | 8 + tools/sched_ext/atropos/src/bpf/atropos.bpf.c | 632 +++++++++++++++++ tools/sched_ext/atropos/src/bpf/atropos.h | 44 ++ tools/sched_ext/atropos/src/main.rs | 648 ++++++++++++++++++ .../sched_ext/atropos/src/oss/atropos_sys.rs | 10 + tools/sched_ext/atropos/src/oss/mod.rs | 29 + tools/sched_ext/atropos/src/util.rs | 24 + 11 files changed, 1507 insertions(+), 2 deletions(-) create mode 100644 tools/sched_ext/atropos/.gitignore create mode 100644 tools/sched_ext/atropos/Cargo.toml create mode 100644 tools/sched_ext/atropos/build.rs create mode 100644 tools/sched_ext/atropos/rustfmt.toml create mode 100644 tools/sched_ext/atropos/src/bpf/atropos.bpf.c create mode 100644 tools/sched_ext/atropos/src/bpf/atropos.h create mode 100644 tools/sched_ext/atropos/src/main.rs create mode 100644 tools/sched_ext/atropos/src/oss/atropos_sys.rs create mode 100644 tools/sched_ext/atropos/src/oss/mod.rs create mode 100644 tools/sched_ext/atropos/src/util.rs diff --git a/tools/sched_ext/Makefile b/tools/sched_ext/Makefile index fcb4faa75e37..0ae20dc0f10d 100644 --- a/tools/sched_ext/Makefile +++ b/tools/sched_ext/Makefile @@ -85,6 +85,8 @@ CFLAGS += -g -O2 -rdynamic -pthread -Wall -Werror $(GENFLAGS) \ -I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR) \ -I$(TOOLSINCDIR) -I$(APIDIR) +CARGOFLAGS := --release + # Silence some warnings when compiled with clang ifneq ($(LLVM),) CFLAGS += -Wno-unused-command-line-argument @@ -116,7 +118,7 @@ BPF_CFLAGS = -g -D__TARGET_ARCH_$(SRCARCH) \ -O2 -mcpu=v3 all: scx_example_dummy scx_example_qmap scx_example_central scx_example_pair \ - scx_example_userland + scx_example_userland atropos # sort removes libbpf duplicates when not cross-building MAKE_DIRS := $(sort $(BUILD_DIR)/libbpf $(HOST_BUILD_DIR)/libbpf \ @@ -188,13 +190,20 @@ scx_example_userland: scx_example_userland.c scx_example_userland.skel.h \ $(CC) $(CFLAGS) -c $< -o $@.o $(CC) -o $@ $@.o $(HOST_BPFOBJ) $(LDFLAGS) +atropos: export RUSTFLAGS = -C link-args=-lzstd -C link-args=-lz -C link-args=-lelf -L $(BPFOBJ_DIR) +atropos: export ATROPOS_CLANG = $(CLANG) +atropos: export ATROPOS_BPF_CFLAGS = $(BPF_CFLAGS) +atropos: $(INCLUDE_DIR)/vmlinux.h + cargo build --manifest-path=atropos/Cargo.toml $(CARGOFLAGS) + clean: + cargo clean --manifest-path=atropos/Cargo.toml rm -rf $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) rm -f *.o *.bpf.o *.skel.h *.subskel.h rm -f scx_example_dummy scx_example_qmap scx_example_central \ scx_example_pair scx_example_userland -.PHONY: all clean +.PHONY: all atropos clean # delete failed targets .DELETE_ON_ERROR: diff --git a/tools/sched_ext/atropos/.gitignore b/tools/sched_ext/atropos/.gitignore new file mode 100644 index 000000000000..186dba259ec2 --- /dev/null +++ b/tools/sched_ext/atropos/.gitignore @@ -0,0 +1,3 @@ +src/bpf/.output +Cargo.lock +target diff --git a/tools/sched_ext/atropos/Cargo.toml b/tools/sched_ext/atropos/Cargo.toml new file mode 100644 index 000000000000..fcfce056c741 --- /dev/null +++ b/tools/sched_ext/atropos/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "atropos-bin" +version = "0.5.0" +authors = ["Dan Schatzberg ", "Meta"] +edition = "2021" +description = "Userspace scheduling with BPF" +license = "GPL-2.0-only" + +[dependencies] +anyhow = "1.0.65" +bitvec = { version = "1.0", features = ["serde"] } +clap = { version = "3.2.17", features = ["derive", "env", "regex", "unicode", "wrap_help"] } +ctrlc = { version = "3.1", features = ["termination"] } +fb_procfs = { git = "https://github.com/facebookincubator/below.git", rev = "f305730"} +hex = "0.4.3" +libbpf-rs = "0.19.1" +libbpf-sys = { version = "1.0.4", features = ["novendor", "static"] } +libc = "0.2.137" +slog = { version = "2.7", features = ["max_level_trace", "nested-values"] } +slog-async = { version = "2.3", features = ["nested-values"] } +slog-term = "2.8" + +[build-dependencies] +bindgen = { version = "0.61.0", features = ["logging", "static"], default-features = false } +libbpf-cargo = "0.13.0" + +[features] +enable_backtrace = [] diff --git a/tools/sched_ext/atropos/build.rs b/tools/sched_ext/atropos/build.rs new file mode 100644 index 000000000000..26e792c5e17e --- /dev/null +++ b/tools/sched_ext/atropos/build.rs @@ -0,0 +1,70 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. + +// This software may be used and distributed according to the terms of the +// GNU General Public License version 2. +extern crate bindgen; + +use std::env; +use std::fs::create_dir_all; +use std::path::Path; +use std::path::PathBuf; + +use libbpf_cargo::SkeletonBuilder; + +const HEADER_PATH: &str = "src/bpf/atropos.h"; + +fn bindgen_atropos() { + // Tell cargo to invalidate the built crate whenever the wrapper changes + println!("cargo:rerun-if-changed={}", HEADER_PATH); + + // The bindgen::Builder is the main entry point + // to bindgen, and lets you build up options for + // the resulting bindings. + let bindings = bindgen::Builder::default() + // The input header we would like to generate + // bindings for. + .header(HEADER_PATH) + // Tell cargo to invalidate the built crate whenever any of the + // included header files changed. + .parse_callbacks(Box::new(bindgen::CargoCallbacks)) + // Finish the builder and generate the bindings. + .generate() + // Unwrap the Result and panic on failure. + .expect("Unable to generate bindings"); + + // Write the bindings to the $OUT_DIR/bindings.rs file. + let out_path = PathBuf::from(env::var("OUT_DIR").unwrap()); + bindings + .write_to_file(out_path.join("atropos-sys.rs")) + .expect("Couldn't write bindings!"); +} + +fn gen_bpf_sched(name: &str) { + let bpf_cflags = env::var("ATROPOS_BPF_CFLAGS").unwrap(); + let clang = env::var("ATROPOS_CLANG").unwrap(); + eprintln!("{}", clang); + let outpath = format!("./src/bpf/.output/{}.skel.rs", name); + let skel = Path::new(&outpath); + let src = format!("./src/bpf/{}.bpf.c", name); + SkeletonBuilder::new() + .source(src.clone()) + .clang(clang) + .clang_args(bpf_cflags) + .build_and_generate(&skel) + .unwrap(); + println!("cargo:rerun-if-changed={}", src); +} + +fn main() { + bindgen_atropos(); + // It's unfortunate we cannot use `OUT_DIR` to store the generated skeleton. + // Reasons are because the generated skeleton contains compiler attributes + // that cannot be `include!()`ed via macro. And we cannot use the `#[path = "..."]` + // trick either because you cannot yet `concat!(env!("OUT_DIR"), "/skel.rs")` inside + // the path attribute either (see https://github.com/rust-lang/rust/pull/83366). + // + // However, there is hope! When the above feature stabilizes we can clean this + // all up. + create_dir_all("./src/bpf/.output").unwrap(); + gen_bpf_sched("atropos"); +} diff --git a/tools/sched_ext/atropos/rustfmt.toml b/tools/sched_ext/atropos/rustfmt.toml new file mode 100644 index 000000000000..b7258ed0a8d8 --- /dev/null +++ b/tools/sched_ext/atropos/rustfmt.toml @@ -0,0 +1,8 @@ +# Get help on options with `rustfmt --help=config` +# Please keep these in alphabetical order. +edition = "2021" +group_imports = "StdExternalCrate" +imports_granularity = "Item" +merge_derives = false +use_field_init_shorthand = true +version = "Two" diff --git a/tools/sched_ext/atropos/src/bpf/atropos.bpf.c b/tools/sched_ext/atropos/src/bpf/atropos.bpf.c new file mode 100644 index 000000000000..17b89d57e487 --- /dev/null +++ b/tools/sched_ext/atropos/src/bpf/atropos.bpf.c @@ -0,0 +1,632 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. + +// This software may be used and distributed according to the terms of the +// GNU General Public License version 2. +// +// Atropos is a multi-domain BPF / userspace hybrid scheduler where the BPF +// part does simple round robin in each domain and the userspace part +// calculates the load factor of each domain and tells the BPF part how to load +// balance the domains. +// +// Every task has an entry in the task_data map which lists which domain the +// task belongs to. When a task first enters the system (atropos_prep_enable), +// they are round-robined to a domain. +// +// atropos_select_cpu is the primary scheduling logic, invoked when a task +// becomes runnable. The lb_data map is populated by userspace to inform the BPF +// scheduler that a task should be migrated to a new domain. Otherwise, the task +// is scheduled in priority order as follows: +// * The current core if the task was woken up synchronously and there are idle +// cpus in the system +// * The previous core, if idle +// * The pinned-to core if the task is pinned to a specific core +// * Any idle cpu in the domain +// +// If none of the above conditions are met, then the task is enqueued to a +// dispatch queue corresponding to the domain (atropos_enqueue). +// +// atropos_dispatch will attempt to consume a task from its domain's +// corresponding dispatch queue (this occurs after scheduling any tasks directly +// assigned to it due to the logic in atropos_select_cpu). If no task is found, +// then greedy load stealing will attempt to find a task on another dispatch +// queue to run. +// +// Load balancing is almost entirely handled by userspace. BPF populates the +// task weight, dom mask and current dom in the task_data map and executes the +// load balance based on userspace populating the lb_data map. +#include "../../../scx_common.bpf.h" +#include "atropos.h" + +#include +#include +#include +#include +#include +#include + +char _license[] SEC("license") = "GPL"; + +/* + * const volatiles are set during initialization and treated as consts by the + * jit compiler. + */ + +/* + * Domains and cpus + */ +const volatile __u32 nr_doms; +const volatile __u32 nr_cpus; +const volatile __u32 cpu_dom_id_map[MAX_CPUS]; +const volatile __u64 dom_cpumasks[MAX_DOMS][MAX_CPUS / 64]; + +const volatile bool switch_all; +const volatile __u64 greedy_threshold = (u64)-1; + +/* base slice duration */ +const volatile __u64 slice_us = 20000; + +/* + * Exit info + */ +int exit_type = SCX_EXIT_NONE; +char exit_msg[SCX_EXIT_MSG_LEN]; + +struct pcpu_ctx { + __u32 dom_rr_cur; /* used when scanning other doms */ + + /* libbpf-rs does not respect the alignment, so pad out the struct explicitly */ + __u8 _padding[CACHELINE_SIZE - sizeof(u64)]; +} __attribute__((aligned(CACHELINE_SIZE))); + +struct pcpu_ctx pcpu_ctx[MAX_CPUS]; + +struct dom_cpumask { + struct bpf_cpumask __kptr_ref *cpumask; +}; + +/* + * Domain cpumasks + */ +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, u32); + __type(value, struct dom_cpumask); + __uint(max_entries, MAX_DOMS); + __uint(map_flags, 0); +} dom_cpumask_map SEC(".maps"); + +/* + * Statistics + */ +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(key_size, sizeof(u32)); + __uint(value_size, sizeof(u64)); + __uint(max_entries, ATROPOS_NR_STATS); +} stats SEC(".maps"); + +static inline void stat_add(enum stat_idx idx, u64 addend) +{ + u32 idx_v = idx; + + u64 *cnt_p = bpf_map_lookup_elem(&stats, &idx_v); + if (cnt_p) + (*cnt_p) += addend; +} + +// Map pid -> task_ctx +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, pid_t); + __type(value, struct task_ctx); + __uint(max_entries, 1000000); + __uint(map_flags, 0); +} task_data SEC(".maps"); + +// This is populated from userspace to indicate which pids should be reassigned +// to new doms +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, pid_t); + __type(value, u32); + __uint(max_entries, 1000); + __uint(map_flags, 0); +} lb_data SEC(".maps"); + +struct refresh_task_cpumask_loop_ctx { + struct task_struct *p; + struct task_ctx *ctx; +}; + +static void task_set_dq(struct task_ctx *task_ctx, struct task_struct *p, + u32 dom_id) +{ + struct dom_cpumask *dom_cpumask; + struct bpf_cpumask *d_cpumask, *t_cpumask; + + dom_cpumask = bpf_map_lookup_elem(&dom_cpumask_map, &dom_id); + if (!dom_cpumask) { + scx_bpf_error("Failed to look up domain %u cpumask", dom_id); + return; + } + + d_cpumask = bpf_cpumask_kptr_get(&dom_cpumask->cpumask); + if (!d_cpumask) { + scx_bpf_error("Failed to get domain %u cpumask kptr", dom_id); + return; + } + + t_cpumask = bpf_cpumask_kptr_get(&task_ctx->cpumask); + if (!t_cpumask) { + scx_bpf_error("Failed to look up task cpumask"); + bpf_cpumask_release(d_cpumask); + return; + } + + bpf_cpumask_and(t_cpumask, (const struct cpumask *)d_cpumask, p->cpus_ptr); + bpf_cpumask_release(d_cpumask); + bpf_cpumask_release(t_cpumask); +} + +s32 BPF_STRUCT_OPS(atropos_select_cpu, struct task_struct *p, int prev_cpu, + u32 wake_flags) +{ + s32 cpu; + pid_t pid = p->pid; + struct task_ctx *task_ctx = bpf_map_lookup_elem(&task_data, &pid); + struct bpf_cpumask *p_cpumask; + + if (!task_ctx) { + stat_add(ATROPOS_STAT_TASK_GET_ERR, 1); + return prev_cpu; + } + + bool load_balanced = false; + u32 *new_dom = bpf_map_lookup_elem(&lb_data, &pid); + if (new_dom && *new_dom != task_ctx->dom_id) { + task_set_dq(task_ctx, p, *new_dom); + stat_add(ATROPOS_STAT_LOAD_BALANCE, 1); + load_balanced = true; + } + + /* + * If WAKE_SYNC and the machine isn't fully saturated, wake up @p to the + * local dq of the waker. + */ + if (p->nr_cpus_allowed > 1 && (wake_flags & SCX_WAKE_SYNC)) { + struct task_struct *current = (void *)bpf_get_current_task(); + + if (!(BPF_CORE_READ(current, flags) & PF_EXITING) && + task_ctx->dom_id < MAX_DOMS) { + struct dom_cpumask *dmask_wrapper; + struct bpf_cpumask *d_cpumask; + + dmask_wrapper = bpf_map_lookup_elem(&dom_cpumask_map, &task_ctx->dom_id); + if (!dmask_wrapper) { + scx_bpf_error("Failed to query for domain %u cpumask", + task_ctx->dom_id); + return prev_cpu; + } + d_cpumask = bpf_cpumask_kptr_get(&dmask_wrapper->cpumask); + if (!d_cpumask) { + scx_bpf_error("Failed to acquire domain %u cpumask kptr", + task_ctx->dom_id); + return prev_cpu; + } + + cpu = scx_bpf_pick_idle_cpu(&d_cpumask->cpumask); + bpf_cpumask_release(d_cpumask); + if (bpf_cpumask_test_cpu(cpu, p->cpus_ptr)) { + stat_add(ATROPOS_STAT_WAKE_SYNC, 1); + goto local; + } + } + } + + /* if the previous CPU is idle, dispatch directly to it */ + if (!load_balanced) { + u8 prev_idle = scx_bpf_test_and_clear_cpu_idle(prev_cpu); + if (*(volatile u8 *)&prev_idle) { + stat_add(ATROPOS_STAT_PREV_IDLE, 1); + cpu = prev_cpu; + goto local; + } + } + + /* If only one core is allowed, dispatch */ + p_cpumask = bpf_cpumask_kptr_get(&task_ctx->cpumask); + if (!p_cpumask) { + scx_bpf_error("Failed to acquire task %s cpumask kptr", + p->comm); + return prev_cpu; + } + if (p->nr_cpus_allowed == 1) { + cpu = bpf_cpumask_first(p_cpumask); + bpf_cpumask_release(p_cpumask); + stat_add(ATROPOS_STAT_PINNED, 1); + goto local; + } + + /* Find an idle cpu and just dispatch */ + cpu = scx_bpf_pick_idle_cpu(p_cpumask); + bpf_cpumask_release(p_cpumask); + if (cpu >= 0) { + stat_add(ATROPOS_STAT_DIRECT_DISPATCH, 1); + goto local; + } + + return prev_cpu; + +local: + task_ctx->dispatch_local = true; + return cpu; +} + +void BPF_STRUCT_OPS(atropos_enqueue, struct task_struct *p, u32 enq_flags) +{ + p->scx.slice = slice_us * 1000; + + pid_t pid = p->pid; + struct task_ctx *task_ctx = bpf_map_lookup_elem(&task_data, &pid); + if (!task_ctx) { + stat_add(ATROPOS_STAT_TASK_GET_ERR, 1); + scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags); + return; + } + + if (task_ctx->dispatch_local) { + task_ctx->dispatch_local = false; + scx_bpf_dispatch(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, enq_flags); + return; + } + + scx_bpf_dispatch(p, task_ctx->dom_id, SCX_SLICE_DFL, enq_flags); +} + +static u32 cpu_to_dom_id(s32 cpu) +{ + if (nr_doms <= 1) + return 0; + + if (cpu >= 0 && cpu < MAX_CPUS) { + u32 dom_id; + + /* + * XXX - idk why the verifier thinks cpu_dom_id_map[cpu] is not + * safe here. + */ + bpf_probe_read_kernel(&dom_id, sizeof(dom_id), + (const void *)&cpu_dom_id_map[cpu]); + return dom_id; + } else { + return MAX_DOMS; + } +} + +static bool is_cpu_in_dom(u32 cpu, u32 dom_id) +{ + u64 mask = 0; + + /* + * XXX - derefing two dimensional array triggers the verifier, use + * probe_read instead. + */ + bpf_probe_read_kernel(&mask, sizeof(mask), + (const void *)&dom_cpumasks[dom_id][cpu / 64]); + return mask & (1LLU << (cpu % 64)); +} + +struct cpumask_intersects_domain_loop_ctx { + const struct cpumask *cpumask; + u32 dom_id; + bool ret; +}; + +static int cpumask_intersects_domain_loopfn(u32 idx, void *data) +{ + struct cpumask_intersects_domain_loop_ctx *lctx = data; + const struct cpumask *cpumask = lctx->cpumask; + + if (bpf_cpumask_test_cpu(idx, cpumask) && + is_cpu_in_dom(idx, lctx->dom_id)) { + lctx->ret = true; + return 1; + } + return 0; +} + +static bool cpumask_intersects_domain(const struct cpumask *cpumask, u32 dom_id) +{ + struct cpumask_intersects_domain_loop_ctx lctx = { + .cpumask = cpumask, + .dom_id = dom_id, + .ret = false, + }; + + bpf_loop(nr_cpus, cpumask_intersects_domain_loopfn, &lctx, 0); + return lctx.ret; +} + +static u32 dom_rr_next(s32 cpu) +{ + if (cpu >= 0 && cpu < MAX_CPUS) { + struct pcpu_ctx *pcpuc = &pcpu_ctx[cpu]; + u32 dom_id = (pcpuc->dom_rr_cur + 1) % nr_doms; + + if (dom_id == cpu_to_dom_id(cpu)) + dom_id = (dom_id + 1) % nr_doms; + + pcpuc->dom_rr_cur = dom_id; + return dom_id; + } + return 0; +} + +static int greedy_loopfn(s32 idx, void *data) +{ + u32 dom_id = dom_rr_next(*(s32 *)data); + + if (scx_bpf_dsq_nr_queued(dom_id) > greedy_threshold && + scx_bpf_consume(dom_id)) { + stat_add(ATROPOS_STAT_GREEDY, 1); + return 1; + } + return 0; +} + +void BPF_STRUCT_OPS(atropos_dispatch, s32 cpu, struct task_struct *prev) +{ + u32 dom = cpu_to_dom_id(cpu); + if (scx_bpf_consume(dom)) { + stat_add(ATROPOS_STAT_DSQ_DISPATCH, 1); + return; + } + + if (greedy_threshold != (u64)-1) + bpf_loop(nr_doms - 1, greedy_loopfn, &cpu, 0); +} + +struct pick_task_domain_loop_ctx { + struct task_struct *p; + const struct cpumask *cpumask; + u64 dom_mask; + u32 dom_rr_base; + u32 dom_id; +}; + +static int pick_task_domain_loopfn(u32 idx, void *data) +{ + struct pick_task_domain_loop_ctx *lctx = data; + u32 dom_id = (lctx->dom_rr_base + idx) % nr_doms; + + if (dom_id >= MAX_DOMS) + return 1; + + if (cpumask_intersects_domain(lctx->cpumask, dom_id)) { + lctx->dom_mask |= 1LLU << dom_id; + if (lctx->dom_id == MAX_DOMS) + lctx->dom_id = dom_id; + } + return 0; +} + +static u32 pick_task_domain(struct task_ctx *task_ctx, struct task_struct *p, + const struct cpumask *cpumask) +{ + struct pick_task_domain_loop_ctx lctx = { + .p = p, + .cpumask = cpumask, + .dom_id = MAX_DOMS, + }; + s32 cpu = bpf_get_smp_processor_id(); + + if (cpu < 0 || cpu >= MAX_CPUS) + return MAX_DOMS; + + lctx.dom_rr_base = ++(pcpu_ctx[cpu].dom_rr_cur); + + bpf_loop(nr_doms, pick_task_domain_loopfn, &lctx, 0); + task_ctx->dom_mask = lctx.dom_mask; + + return lctx.dom_id; +} + +static void task_set_domain(struct task_ctx *task_ctx, struct task_struct *p, + const struct cpumask *cpumask) +{ + u32 dom_id = 0; + + if (nr_doms > 1) + dom_id = pick_task_domain(task_ctx, p, cpumask); + + task_set_dq(task_ctx, p, dom_id); +} + +void BPF_STRUCT_OPS(atropos_set_cpumask, struct task_struct *p, + const struct cpumask *cpumask) +{ + pid_t pid = p->pid; + struct task_ctx *task_ctx = bpf_map_lookup_elem(&task_data, &pid); + if (!task_ctx) { + stat_add(ATROPOS_STAT_TASK_GET_ERR, 1); + return; + } + + task_set_domain(task_ctx, p, cpumask); +} + +s32 BPF_STRUCT_OPS(atropos_prep_enable, struct task_struct *p, + struct scx_enable_args *args) +{ + struct bpf_cpumask *cpumask; + struct task_ctx task_ctx, *map_value; + long ret; + pid_t pid; + + memset(&task_ctx, 0, sizeof(task_ctx)); + task_ctx.weight = p->scx.weight; + + pid = p->pid; + ret = bpf_map_update_elem(&task_data, &pid, &task_ctx, BPF_NOEXIST); + if (ret) { + stat_add(ATROPOS_STAT_TASK_GET_ERR, 1); + return ret; + } + + /* + * Read the entry from the map immediately so we can add the cpumask + * with bpf_kptr_xchg(). + */ + map_value = bpf_map_lookup_elem(&task_data, &pid); + if (!map_value) + /* Should never happen -- it was just inserted above. */ + return -EINVAL; + + cpumask = bpf_cpumask_create(); + if (!cpumask) { + bpf_map_delete_elem(&task_data, &pid); + return -ENOMEM; + } + + cpumask = bpf_kptr_xchg(&map_value->cpumask, cpumask); + if (cpumask) { + /* Should never happen as we just inserted it above. */ + bpf_cpumask_release(cpumask); + bpf_map_delete_elem(&task_data, &pid); + return -EINVAL; + } + + task_set_domain(map_value, p, p->cpus_ptr); + + return 0; +} + +void BPF_STRUCT_OPS(atropos_disable, struct task_struct *p) +{ + pid_t pid = p->pid; + long ret = bpf_map_delete_elem(&task_data, &pid); + if (ret) { + stat_add(ATROPOS_STAT_TASK_GET_ERR, 1); + return; + } +} + +struct initialize_domain_loop_ctx { + struct bpf_cpumask *cpumask; + u32 dom_id; +}; + +static int set_cpumask_bit(u32 idx, void *data) +{ + struct initialize_domain_loop_ctx *lctx = data; + u32 dom_id = lctx->dom_id; + u64 mask = 1LLU << (idx % 64); + const volatile __u64 *dptr; + + dptr = MEMBER_VPTR(dom_cpumasks, [dom_id][idx / 64]); + if (!dptr) { + scx_bpf_error("Failed to initialize cpu %u for dom %u", idx, dom_id); + return 1; + } + + if ((*dptr & mask)) + bpf_cpumask_set_cpu(idx, lctx->cpumask); + else + bpf_cpumask_clear_cpu(idx, lctx->cpumask); + + return 0; +} + +static int create_local_dsq(u32 idx, void *data) +{ + struct dom_cpumask entry, *v; + struct bpf_cpumask *cpumask; + struct initialize_domain_loop_ctx loop_ctx; + u32 dom_id = idx; + s64 ret; + + ret = scx_bpf_create_dsq(dom_id, -1); + if (ret < 0) { + scx_bpf_error("Failed to create dsq %u (%d)", dom_id, ret); + return 1; + } + + memset(&entry, 0, sizeof(entry)); + ret = bpf_map_update_elem(&dom_cpumask_map, &dom_id, &entry, 0); + if (ret) { + scx_bpf_error("Failed to add dom_cpumask entry %u (%d)", dom_id, ret); + return 1; + } + + v = bpf_map_lookup_elem(&dom_cpumask_map, &dom_id); + if (!v) { + /* Should never happen, we just inserted it above. */ + scx_bpf_error("Failed to lookup dom element %u", dom_id); + return 1; + } + + cpumask = bpf_cpumask_create(); + if (!cpumask) { + scx_bpf_error("Failed to create BPF cpumask for domain %u", dom_id); + return 1; + } + + loop_ctx.cpumask = cpumask; + loop_ctx.dom_id = dom_id; + if (bpf_loop(nr_cpus, set_cpumask_bit, &loop_ctx, 0) != nr_cpus) { + scx_bpf_error("Failed to initialize cpumask for domain %u", dom_id); + bpf_cpumask_release(cpumask); + return 1; + } + + cpumask = bpf_kptr_xchg(&v->cpumask, cpumask); + if (cpumask) { + scx_bpf_error("Domain %u was already present", dom_id); + bpf_cpumask_release(cpumask); + return 1; + } + + return 0; +} + +int BPF_STRUCT_OPS_SLEEPABLE(atropos_init) +{ + u32 local_nr_doms = nr_doms; + + bpf_printk("atropos init"); + + if (switch_all) + scx_bpf_switch_all(); + + // BPF verifier gets cranky if we don't bound this like so + if (local_nr_doms > MAX_DOMS) + local_nr_doms = MAX_DOMS; + + bpf_loop(local_nr_doms, create_local_dsq, NULL, 0); + + for (u32 i = 0; i < nr_cpus; ++i) { + pcpu_ctx[i].dom_rr_cur = i; + } + + return 0; +} + +void BPF_STRUCT_OPS(atropos_exit, struct scx_exit_info *ei) +{ + bpf_probe_read_kernel_str(exit_msg, sizeof(exit_msg), ei->msg); + exit_type = ei->type; +} + +SEC(".struct_ops") +struct sched_ext_ops atropos = { + .select_cpu = (void *)atropos_select_cpu, + .enqueue = (void *)atropos_enqueue, + .dispatch = (void *)atropos_dispatch, + .set_cpumask = (void *)atropos_set_cpumask, + .prep_enable = (void *)atropos_prep_enable, + .disable = (void *)atropos_disable, + .init = (void *)atropos_init, + .exit = (void *)atropos_exit, + .flags = 0, + .name = "atropos", +}; diff --git a/tools/sched_ext/atropos/src/bpf/atropos.h b/tools/sched_ext/atropos/src/bpf/atropos.h new file mode 100644 index 000000000000..921210ec2a3c --- /dev/null +++ b/tools/sched_ext/atropos/src/bpf/atropos.h @@ -0,0 +1,44 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. + +// This software may be used and distributed according to the terms of the +// GNU General Public License version 2. +#ifndef __ATROPOS_H +#define __ATROPOS_H + +#include +#ifndef __kptr_ref +#ifdef __KERNEL__ +#error "__kptr_ref not defined in the kernel" +#endif +#define __kptr_ref +#endif + +#define MAX_CPUS 512 +#define MAX_DOMS 64 /* limited to avoid complex bitmask ops */ +#define CACHELINE_SIZE 64 + +/* Statistics */ +enum stat_idx { + ATROPOS_STAT_TASK_GET_ERR, + ATROPOS_STAT_TASK_GET_ERR_ENABLE, + ATROPOS_STAT_CPUMASK_ERR, + ATROPOS_STAT_WAKE_SYNC, + ATROPOS_STAT_PREV_IDLE, + ATROPOS_STAT_PINNED, + ATROPOS_STAT_DIRECT_DISPATCH, + ATROPOS_STAT_DSQ_DISPATCH, + ATROPOS_STAT_GREEDY, + ATROPOS_STAT_LOAD_BALANCE, + ATROPOS_STAT_LAST_TASK, + ATROPOS_NR_STATS, +}; + +struct task_ctx { + unsigned long long dom_mask; /* the domains this task can run on */ + struct bpf_cpumask __kptr_ref *cpumask; + unsigned int dom_id; + unsigned int weight; + bool dispatch_local; +}; + +#endif /* __ATROPOS_H */ diff --git a/tools/sched_ext/atropos/src/main.rs b/tools/sched_ext/atropos/src/main.rs new file mode 100644 index 000000000000..b9ae312a562f --- /dev/null +++ b/tools/sched_ext/atropos/src/main.rs @@ -0,0 +1,648 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. + +// This software may be used and distributed according to the terms of the +// GNU General Public License version 2. +#![deny(clippy::all)] +use std::collections::BTreeMap; +use std::ffi::CStr; +use std::sync::atomic::AtomicBool; +use std::sync::atomic::Ordering; +use std::sync::Arc; + +use ::fb_procfs as procfs; +use anyhow::anyhow; +use anyhow::bail; +use anyhow::Context; +use bitvec::prelude::*; +use clap::Parser; + +mod util; + +oss_shim!(); + +/// Atropos is a multi-domain BPF / userspace hybrid scheduler where the BPF +/// part does simple round robin in each domain and the userspace part +/// calculates the load factor of each domain and tells the BPF part how to load +/// balance the domains. + +/// This scheduler demonstrates dividing scheduling logic between BPF and +/// userspace and using rust to build the userspace part. An earlier variant of +/// this scheduler was used to balance across six domains, each representing a +/// chiplet in a six-chiplet AMD processor, and could match the performance of +/// production setup using CFS. +#[derive(Debug, Parser)] +struct Opt { + /// Set the log level for more or less verbose output. --log_level=debug + /// will output libbpf verbose details + #[clap(short, long, default_value = "info")] + log_level: String, + /// Set the cpumask for a domain, provide multiple --cpumasks, one for each + /// domain. E.g. --cpumasks 0xff_00ff --cpumasks 0xff00 will create two + /// domains with the corresponding CPUs belonging to each domain. Each CPU + /// must belong to precisely one domain. + #[clap(short, long, required = true, min_values = 1)] + cpumasks: Vec, + /// Switch all tasks to sched_ext. If not specified, only tasks which + /// have their scheduling policy set to SCHED_EXT using + /// sched_setscheduler(2) are switched. + #[clap(short, long, default_value = "false")] + all: bool, + /// Enable load balancing. Periodically userspace will calculate the load + /// factor of each domain and instruct BPF which processes to move. + #[clap(short, long, default_value = "true")] + load_balance: bool, + /// Enable greedy task stealing. When a domain is idle, a cpu will attempt + /// to steal tasks from a domain with at least greedy_threshold tasks + /// enqueued. These tasks aren't permanently stolen from the domain. + #[clap(short, long)] + greedy_threshold: Option, +} + +type CpusetDqPair = (Vec>, Vec); + +// Returns Vec of cpuset for each dq and a vec of dq for each cpu +fn parse_cpusets(cpumasks: &[String]) -> anyhow::Result { + if cpumasks.len() > atropos_sys::MAX_DOMS as usize { + bail!( + "Number of requested DSQs ({}) is greater than MAX_DOMS ({})", + cpumasks.len(), + atropos_sys::MAX_DOMS + ); + } + let num_cpus = libbpf_rs::num_possible_cpus()?; + if num_cpus > atropos_sys::MAX_CPUS as usize { + bail!( + "num_cpus ({}) is greater than MAX_CPUS ({})", + num_cpus, + atropos_sys::MAX_CPUS, + ); + } + let mut cpus = vec![-1i32; num_cpus]; + let mut cpusets = vec![bitvec![u64, Lsb0; 0; atropos_sys::MAX_CPUS as usize]; cpumasks.len()]; + for (dq, cpumask) in cpumasks.iter().enumerate() { + let hex_str = { + let mut tmp_str = cpumask + .strip_prefix("0x") + .unwrap_or(cpumask) + .replace('_', ""); + if tmp_str.len() % 2 != 0 { + tmp_str = "0".to_string() + &tmp_str; + } + tmp_str + }; + let byte_vec = hex::decode(&hex_str) + .with_context(|| format!("Failed to parse cpumask: {}", cpumask))?; + + for (index, &val) in byte_vec.iter().rev().enumerate() { + let mut v = val; + while v != 0 { + let lsb = v.trailing_zeros() as usize; + v &= !(1 << lsb); + let cpu = index * 8 + lsb; + if cpu > num_cpus { + bail!( + concat!( + "Found cpu ({}) in cpumask ({}) which is larger", + " than the number of cpus on the machine ({})" + ), + cpu, + cpumask, + num_cpus + ); + } + if cpus[cpu] != -1 { + bail!( + "Found cpu ({}) with dq ({}) but also in cpumask ({})", + cpu, + cpus[cpu], + cpumask + ); + } + cpus[cpu] = dq as i32; + cpusets[dq].set(cpu, true); + } + } + cpusets[dq].set_uninitialized(false); + } + + for (cpu, &dq) in cpus.iter().enumerate() { + if dq < 0 { + bail!( + "Cpu {} not assigned to any dq. Make sure it is covered by some --cpumasks argument.", + cpu + ); + } + } + + Ok((cpusets, cpus)) +} + +struct Sample { + total_cpu: procfs::CpuStat, +} + +fn get_cpustats(reader: &mut procfs::ProcReader) -> anyhow::Result { + let stat = reader.read_stat().context("Failed to read procfs")?; + Ok(Sample { + total_cpu: stat + .total_cpu + .ok_or_else(|| anyhow!("Could not read total cpu stat in proc"))?, + }) +} + +fn calculate_cpu_busy(prev: &procfs::CpuStat, next: &procfs::CpuStat) -> anyhow::Result { + match (prev, next) { + ( + procfs::CpuStat { + user_usec: Some(prev_user), + nice_usec: Some(prev_nice), + system_usec: Some(prev_system), + idle_usec: Some(prev_idle), + iowait_usec: Some(prev_iowait), + irq_usec: Some(prev_irq), + softirq_usec: Some(prev_softirq), + stolen_usec: Some(prev_stolen), + guest_usec: _, + guest_nice_usec: _, + }, + procfs::CpuStat { + user_usec: Some(curr_user), + nice_usec: Some(curr_nice), + system_usec: Some(curr_system), + idle_usec: Some(curr_idle), + iowait_usec: Some(curr_iowait), + irq_usec: Some(curr_irq), + softirq_usec: Some(curr_softirq), + stolen_usec: Some(curr_stolen), + guest_usec: _, + guest_nice_usec: _, + }, + ) => { + let idle_usec = curr_idle - prev_idle; + let iowait_usec = curr_iowait - prev_iowait; + let user_usec = curr_user - prev_user; + let system_usec = curr_system - prev_system; + let nice_usec = curr_nice - prev_nice; + let irq_usec = curr_irq - prev_irq; + let softirq_usec = curr_softirq - prev_softirq; + let stolen_usec = curr_stolen - prev_stolen; + + let busy_usec = + user_usec + system_usec + nice_usec + irq_usec + softirq_usec + stolen_usec; + let total_usec = idle_usec + busy_usec + iowait_usec; + Ok(busy_usec as f64 / total_usec as f64) + } + _ => { + bail!("Some procfs stats are not populated!"); + } + } +} + +fn calculate_pid_busy( + prev: &procfs::PidStat, + next: &procfs::PidStat, + dur: std::time::Duration, +) -> anyhow::Result { + match ( + (prev.user_usecs, prev.system_usecs), + (next.user_usecs, prev.system_usecs), + ) { + ((Some(prev_user), Some(prev_system)), (Some(next_user), Some(next_system))) => { + if (next_user >= prev_user) && (next_system >= prev_system) { + let busy_usec = next_user + next_system - prev_user - prev_system; + Ok(busy_usec as f64 / dur.as_micros() as f64) + } else { + bail!("Pid usage values look wrong"); + } + } + _ => { + bail!("Some procfs stats are not populated!"); + } + } +} + +struct PidInfo { + pub pid: i32, + pub dom: u32, + pub dom_mask: u64, +} + +struct LoadInfo { + pids_by_milliload: BTreeMap, + pid_stats: BTreeMap, + global_load_sum: f64, + dom_load: Vec, +} + +// We calculate the load for each task and then each dom by enumerating all the +// tasks in task_data and calculating their CPU util from procfs. + +// Given procfs reader, task data map, and pidstat from previous calculation, +// return: +// * a sorted map from milliload -> pid_data, +// * a map from pid -> pidstat +// * a vec of per-dom looads +fn calculate_load( + proc_reader: &procfs::ProcReader, + task_data: &libbpf_rs::Map, + interval: std::time::Duration, + prev_pid_stat: &BTreeMap, + nr_doms: usize, +) -> anyhow::Result { + let mut ret = LoadInfo { + pids_by_milliload: BTreeMap::new(), + pid_stats: BTreeMap::new(), + global_load_sum: 0f64, + dom_load: vec![0f64; nr_doms], + }; + for key in task_data.keys() { + if let Some(task_ctx_vec) = task_data + .lookup(&key, libbpf_rs::MapFlags::ANY) + .context("Failed to lookup task_data")? + { + let task_ctx = + unsafe { &*(task_ctx_vec.as_slice().as_ptr() as *const atropos_sys::task_ctx) }; + let pid = i32::from_ne_bytes( + key.as_slice() + .try_into() + .context("Invalid key length in task_data map")?, + ); + match proc_reader.read_tid_stat(pid as u32) { + Ok(stat) => { + ret.pid_stats.insert(pid, stat); + } + Err(procfs::Error::IoError(_, ref e)) + if e.raw_os_error() + .map_or(false, |ec| ec == 2 || ec == 3 /* ENOENT or ESRCH */) => + { + continue; + } + Err(e) => { + bail!(e); + } + } + let pid_load = match (prev_pid_stat.get(&pid), ret.pid_stats.get(&pid)) { + (Some(prev_pid_stat), Some(next_pid_stat)) => { + calculate_pid_busy(prev_pid_stat, next_pid_stat, interval)? + } + // If we don't have any utilization #s for the process, just skip it + _ => { + continue; + } + } * task_ctx.weight as f64; + if !pid_load.is_finite() || pid_load <= 0.0 { + continue; + } + ret.global_load_sum += pid_load; + ret.dom_load[task_ctx.dom_id as usize] += pid_load; + // Only record pids that are eligible for load balancing + if task_ctx.dom_mask == (1u64 << task_ctx.dom_id) { + continue; + } + ret.pids_by_milliload.insert( + (pid_load * 1000.0) as u64, + PidInfo { + pid, + dom: task_ctx.dom_id, + dom_mask: task_ctx.dom_mask, + }, + ); + } + } + Ok(ret) +} + +#[derive(Copy, Clone, Default)] +struct DomLoadBalanceInfo { + load_to_pull: f64, + load_to_give: f64, +} + +#[derive(Default)] +struct LoadBalanceInfo { + doms: Vec, + doms_with_load_to_pull: BTreeMap, + doms_with_load_to_give: BTreeMap, +} + +// To balance dom loads we identify doms with lower and higher load than average +fn calculate_dom_load_balance(global_load_avg: f64, dom_load: &[f64]) -> LoadBalanceInfo { + let mut ret = LoadBalanceInfo::default(); + ret.doms.resize(dom_load.len(), Default::default()); + + const LOAD_IMBAL_HIGH_PCT: f64 = 0.10; + const LOAD_IMBAL_MAX_ADJ_PCT: f64 = 0.10; + let high = global_load_avg * LOAD_IMBAL_HIGH_PCT; + let adj_max = global_load_avg * LOAD_IMBAL_MAX_ADJ_PCT; + + for (dom, dom_load) in dom_load.iter().enumerate() { + let mut imbal = dom_load - global_load_avg; + + let mut dom_load_to_pull = 0f64; + let mut dom_load_to_give = 0f64; + if imbal >= 0f64 { + dom_load_to_give = imbal; + } else { + imbal = -imbal; + if imbal > high { + dom_load_to_pull = f64::min(imbal, adj_max); + } + } + ret.doms[dom].load_to_pull = dom_load_to_pull; + ret.doms[dom].load_to_give = dom_load_to_give; + if dom_load_to_pull > 0f64 { + ret.doms_with_load_to_pull + .insert(dom as u32, dom_load_to_pull); + } + if dom_load_to_give > 0f64 { + ret.doms_with_load_to_give + .insert(dom as u32, dom_load_to_give); + } + } + ret +} + +fn clear_map(map: &mut libbpf_rs::Map) { + // XXX: libbpf_rs has some design flaw that make it impossible to + // delete while iterating despite it being safe so we alias it here + let deleter: &mut libbpf_rs::Map = unsafe { &mut *(map as *mut _) }; + for key in map.keys() { + let _ = deleter.delete(&key); + } +} + +// Actually execute the load balancing. Concretely this writes pid -> dom +// entries into the lb_data map for bpf side to consume. +// +// The logic here is simple, greedily balance the heaviest load processes until +// either we have no doms with load to give or no doms with load to pull. +fn load_balance( + global_load_avg: f64, + lb_data: &mut libbpf_rs::Map, + pids_by_milliload: &BTreeMap, + mut doms_with_load_to_pull: BTreeMap, + mut doms_with_load_to_give: BTreeMap, +) -> anyhow::Result<()> { + clear_map(lb_data); + const LOAD_IMBAL_MIN_ADJ_PCT: f64 = 0.01; + let adj_min = global_load_avg * LOAD_IMBAL_MIN_ADJ_PCT; + for (pid_milliload, pidinfo) in pids_by_milliload.iter().rev() { + if doms_with_load_to_give.is_empty() || doms_with_load_to_pull.is_empty() { + break; + } + + let pid_load = *pid_milliload as f64 / 1000f64; + let mut remove_to_give = None; + let mut remove_to_pull = None; + if let Some(dom_imbal) = doms_with_load_to_give.get_mut(&pidinfo.dom) { + if *dom_imbal < pid_load { + continue; + } + + for (new_dom, new_dom_imbal) in doms_with_load_to_pull.iter_mut() { + if (pidinfo.dom_mask & (1 << new_dom)) == 0 || *new_dom_imbal < pid_load { + continue; + } + + *dom_imbal -= pid_load; + if *dom_imbal <= adj_min { + remove_to_give = Some(pidinfo.dom); + } + *new_dom_imbal -= pid_load; + if *new_dom_imbal <= adj_min { + remove_to_pull = Some(pidinfo.dom); + } + + lb_data + .update( + &(pidinfo.pid as libc::pid_t).to_ne_bytes(), + &new_dom.to_ne_bytes(), + libbpf_rs::MapFlags::NO_EXIST, + ) + .context("Failed to update lb_data")?; + break; + } + } + + remove_to_give.map(|dom| doms_with_load_to_give.remove(&dom)); + remove_to_pull.map(|dom| doms_with_load_to_pull.remove(&dom)); + } + Ok(()) +} + +fn print_stats( + logger: slog::Logger, + stats_map: &mut libbpf_rs::Map, + nr_doms: usize, + nr_cpus: usize, + cpu_busy: f64, + global_load_avg: f64, + dom_load: &[f64], + dom_lb_info: &[DomLoadBalanceInfo], +) -> anyhow::Result<()> { + let stats = { + let mut stats: Vec = Vec::new(); + let zero_vec = vec![vec![0u8; stats_map.value_size() as usize]; nr_cpus]; + for stat in 0..atropos_sys::stat_idx_ATROPOS_NR_STATS { + let cpu_stat_vec = stats_map + .lookup_percpu(&(stat as u32).to_ne_bytes(), libbpf_rs::MapFlags::ANY) + .with_context(|| format!("Failed to lookup stat {}", stat))? + .expect("per-cpu stat should exist"); + let sum = cpu_stat_vec + .iter() + .map(|val| { + u64::from_ne_bytes( + val.as_slice() + .try_into() + .expect("Invalid value length in stat map"), + ) + }) + .sum(); + stats_map + .update_percpu( + &(stat as u32).to_ne_bytes(), + &zero_vec, + libbpf_rs::MapFlags::ANY, + ) + .context("Failed to zero stat")?; + stats.push(sum); + } + stats + }; + let mut total = 0; + total += stats[atropos_sys::stat_idx_ATROPOS_STAT_WAKE_SYNC as usize]; + total += stats[atropos_sys::stat_idx_ATROPOS_STAT_PREV_IDLE as usize]; + total += stats[atropos_sys::stat_idx_ATROPOS_STAT_PINNED as usize]; + total += stats[atropos_sys::stat_idx_ATROPOS_STAT_DIRECT_DISPATCH as usize]; + total += stats[atropos_sys::stat_idx_ATROPOS_STAT_DSQ_DISPATCH as usize]; + total += stats[atropos_sys::stat_idx_ATROPOS_STAT_GREEDY as usize]; + total += stats[atropos_sys::stat_idx_ATROPOS_STAT_LAST_TASK as usize]; + slog::info!(logger, "cpu={:5.1}", cpu_busy * 100.0); + slog::info!( + logger, + "task_get_errs: {}, cpumask_errs: {}", + stats[atropos_sys::stat_idx_ATROPOS_STAT_TASK_GET_ERR as usize], + stats[atropos_sys::stat_idx_ATROPOS_STAT_CPUMASK_ERR as usize], + ); + slog::info!( + logger, + "tot={:6} wake_sync={:4.1},prev_idle={:4.1},pinned={:4.1},direct={:4.1},dq={:4.1},greedy={:4.1}", + total, + stats[atropos_sys::stat_idx_ATROPOS_STAT_WAKE_SYNC as usize] as f64 / total as f64 * 100f64, + stats[atropos_sys::stat_idx_ATROPOS_STAT_PREV_IDLE as usize] as f64 / total as f64 * 100f64, + stats[atropos_sys::stat_idx_ATROPOS_STAT_PINNED as usize] as f64 / total as f64 * 100f64, + stats[atropos_sys::stat_idx_ATROPOS_STAT_DIRECT_DISPATCH as usize] as f64 / total as f64 + * 100f64, + stats[atropos_sys::stat_idx_ATROPOS_STAT_DSQ_DISPATCH as usize] as f64 / total as f64 + * 100f64, + stats[atropos_sys::stat_idx_ATROPOS_STAT_GREEDY as usize] as f64 / total as f64 * 100f64, + ); + + slog::info!( + logger, + "load_avg:{:.1}, load_balances={}", + global_load_avg, + stats[atropos_sys::stat_idx_ATROPOS_STAT_LOAD_BALANCE as usize] + ); + for i in 0..nr_doms { + slog::info!(logger, "DOM[{:02}]", i); + slog::info!( + logger, + " load={:.1} to_pull={:.1},to_give={:.1}", + dom_load[i], + dom_lb_info[i].load_to_pull, + dom_lb_info[i].load_to_give, + ); + } + Ok(()) +} + +pub fn run( + logger: slog::Logger, + debug: bool, + cpumasks: Vec, + switch_all: bool, + balance_load: bool, + greedy_threshold: Option, +) -> anyhow::Result<()> { + slog::info!(logger, "Atropos Scheduler Initialized"); + let mut skel_builder = AtroposSkelBuilder::default(); + skel_builder.obj_builder.debug(debug); + let mut skel = skel_builder.open().context("Failed to open BPF program")?; + + let (cpusets, cpus) = parse_cpusets(&cpumasks)?; + let nr_doms = cpusets.len(); + let nr_cpus = libbpf_rs::num_possible_cpus()?; + skel.rodata().nr_doms = nr_doms as u32; + skel.rodata().nr_cpus = nr_cpus as u32; + + for (cpu, dom) in cpus.iter().enumerate() { + skel.rodata().cpu_dom_id_map[cpu] = *dom as u32; + } + + for (dom, cpuset) in cpusets.iter().enumerate() { + let raw_cpuset_slice = cpuset.as_raw_slice(); + let dom_cpumask_slice = &mut skel.rodata().dom_cpumasks[dom]; + let (left, _) = dom_cpumask_slice.split_at_mut(raw_cpuset_slice.len()); + left.clone_from_slice(cpuset.as_raw_slice()); + slog::info!(logger, "dom {} cpumask {:X?}", dom, dom_cpumask_slice); + } + + skel.rodata().switch_all = switch_all; + + if let Some(greedy) = greedy_threshold { + skel.rodata().greedy_threshold = greedy; + } + + let mut skel = skel.load().context("Failed to load BPF program")?; + skel.attach().context("Failed to attach BPF program")?; + + let _structops = skel + .maps_mut() + .atropos() + .attach_struct_ops() + .context("Failed to attach atropos struct ops")?; + slog::info!(logger, "Atropos Scheduler Attached"); + let shutdown = Arc::new(AtomicBool::new(false)); + let shutdown_clone = shutdown.clone(); + ctrlc::set_handler(move || { + shutdown_clone.store(true, Ordering::Relaxed); + }) + .context("Error setting Ctrl-C handler")?; + + let mut proc_reader = procfs::ProcReader::new(); + let mut prev_sample = get_cpustats(&mut proc_reader)?; + let mut prev_pid_stat: BTreeMap = BTreeMap::new(); + while !shutdown.load(Ordering::Relaxed) + && unsafe { std::ptr::read_volatile(&skel.bss().exit_type as *const _) } == 0 + { + let interval = std::time::Duration::from_secs(1); + std::thread::sleep(interval); + let now = std::time::SystemTime::now(); + let next_sample = get_cpustats(&mut proc_reader)?; + let cpu_busy = calculate_cpu_busy(&prev_sample.total_cpu, &next_sample.total_cpu)?; + prev_sample = next_sample; + let load_info = calculate_load( + &proc_reader, + skel.maps().task_data(), + interval, + &prev_pid_stat, + nr_doms, + )?; + prev_pid_stat = load_info.pid_stats; + + let global_load_avg = load_info.global_load_sum / nr_doms as f64; + let mut lb_info = calculate_dom_load_balance(global_load_avg, &load_info.dom_load); + + let doms_with_load_to_pull = std::mem::take(&mut lb_info.doms_with_load_to_pull); + let doms_with_load_to_give = std::mem::take(&mut lb_info.doms_with_load_to_give); + if balance_load { + load_balance( + global_load_avg, + skel.maps_mut().lb_data(), + &load_info.pids_by_milliload, + doms_with_load_to_pull, + doms_with_load_to_give, + )?; + slog::info!( + logger, + "Load balancing took {:?}", + now.elapsed().context("Getting a duration failed")? + ); + } + print_stats( + logger.clone(), + skel.maps_mut().stats(), + nr_doms, + nr_cpus, + cpu_busy, + global_load_avg, + &load_info.dom_load, + &lb_info.doms, + )?; + } + /* Report msg if EXT_OPS_EXIT_ERROR */ + if skel.bss().exit_type == 2 { + let exit_msg_cstr = unsafe { CStr::from_ptr(skel.bss().exit_msg.as_ptr() as *const _) }; + let exit_msg = exit_msg_cstr + .to_str() + .context("Failed to convert exit msg to string")?; + eprintln!("exit_type={} msg={}", skel.bss().exit_type, exit_msg); + } + Ok(()) +} + +fn main() -> anyhow::Result<()> { + let opts = Opt::parse(); + let logger = setup_logger(&opts.log_level)?; + let debug = opts.log_level == "debug"; + + run( + logger, + debug, + opts.cpumasks, + opts.all, + opts.load_balance, + opts.greedy_threshold, + ) +} diff --git a/tools/sched_ext/atropos/src/oss/atropos_sys.rs b/tools/sched_ext/atropos/src/oss/atropos_sys.rs new file mode 100644 index 000000000000..bbeaf856d40e --- /dev/null +++ b/tools/sched_ext/atropos/src/oss/atropos_sys.rs @@ -0,0 +1,10 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. + +// This software may be used and distributed according to the terms of the +// GNU General Public License version 2. +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(non_snake_case)] +#![allow(dead_code)] + +include!(concat!(env!("OUT_DIR"), "/atropos-sys.rs")); diff --git a/tools/sched_ext/atropos/src/oss/mod.rs b/tools/sched_ext/atropos/src/oss/mod.rs new file mode 100644 index 000000000000..5afcf35f777d --- /dev/null +++ b/tools/sched_ext/atropos/src/oss/mod.rs @@ -0,0 +1,29 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. + +// This software may be used and distributed according to the terms of the +// GNU General Public License version 2. +#[path = "../bpf/.output/atropos.skel.rs"] +mod atropos; +use std::str::FromStr; + +use anyhow::bail; +pub use atropos::*; +use slog::o; +use slog::Drain; +use slog::Level; + +pub mod atropos_sys; + +pub fn setup_logger(level: &str) -> anyhow::Result { + let log_level = match Level::from_str(level) { + Ok(l) => l, + Err(()) => bail!("Failed to parse \"{}\" as a log level", level), + }; + let decorator = slog_term::TermDecorator::new().build(); + let drain = slog_term::FullFormat::new(decorator).build().fuse(); + let drain = slog_async::Async::new(drain) + .build() + .filter_level(log_level) + .fuse(); + Ok(slog::Logger::root(drain, o!())) +} diff --git a/tools/sched_ext/atropos/src/util.rs b/tools/sched_ext/atropos/src/util.rs new file mode 100644 index 000000000000..eae414c0919a --- /dev/null +++ b/tools/sched_ext/atropos/src/util.rs @@ -0,0 +1,24 @@ +// Copyright (c) Meta Platforms, Inc. and affiliates. + +// This software may be used and distributed according to the terms of the +// GNU General Public License version 2. + +// Shim between facebook types and open source types. +// +// The type interfaces and module hierarchy should be identical on +// both "branches". And since we glob import, all the submodules in +// this crate will inherit our name bindings and can use generic paths, +// eg `crate::logging::setup(..)`. +#[macro_export] +macro_rules! oss_shim { + () => { + #[cfg(fbcode_build)] + mod facebook; + #[cfg(fbcode_build)] + use facebook::*; + #[cfg(not(fbcode_build))] + mod oss; + #[cfg(not(fbcode_build))] + use oss::*; + }; +}