@@ -12,6 +12,15 @@
static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled);
+static bool check_builtin_idle_enabled(void)
+{
+ if (static_branch_likely(&scx_builtin_idle_enabled))
+ return true;
+
+ scx_ops_error("built-in idle tracking is disabled");
+ return false;
+}
+
#ifdef CONFIG_SMP
#ifdef CONFIG_CPUMASK_OFFSTACK
#define CL_ALIGNED_IF_ONSTACK
@@ -508,10 +517,8 @@ __bpf_kfunc_start_defs();
__bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
u64 wake_flags, bool *is_idle)
{
- if (!static_branch_likely(&scx_builtin_idle_enabled)) {
- scx_ops_error("built-in idle tracking is disabled");
+ if (!check_builtin_idle_enabled())
goto prev_cpu;
- }
if (!scx_kf_allowed(SCX_KF_SELECT_CPU))
goto prev_cpu;
@@ -533,10 +540,8 @@ __bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
*/
__bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void)
{
- if (!static_branch_likely(&scx_builtin_idle_enabled)) {
- scx_ops_error("built-in idle tracking is disabled");
+ if (!check_builtin_idle_enabled())
return cpu_none_mask;
- }
#ifdef CONFIG_SMP
return idle_masks.cpu;
@@ -554,10 +559,8 @@ __bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void)
*/
__bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask(void)
{
- if (!static_branch_likely(&scx_builtin_idle_enabled)) {
- scx_ops_error("built-in idle tracking is disabled");
+ if (!check_builtin_idle_enabled())
return cpu_none_mask;
- }
#ifdef CONFIG_SMP
if (sched_smt_active())
@@ -595,10 +598,8 @@ __bpf_kfunc void scx_bpf_put_idle_cpumask(const struct cpumask *idle_mask)
*/
__bpf_kfunc bool scx_bpf_test_and_clear_cpu_idle(s32 cpu)
{
- if (!static_branch_likely(&scx_builtin_idle_enabled)) {
- scx_ops_error("built-in idle tracking is disabled");
+ if (!check_builtin_idle_enabled())
return false;
- }
if (ops_cpu_valid(cpu, NULL))
return test_and_clear_cpu_idle(cpu);
@@ -628,10 +629,8 @@ __bpf_kfunc bool scx_bpf_test_and_clear_cpu_idle(s32 cpu)
__bpf_kfunc s32 scx_bpf_pick_idle_cpu(const struct cpumask *cpus_allowed,
u64 flags)
{
- if (!static_branch_likely(&scx_builtin_idle_enabled)) {
- scx_ops_error("built-in idle tracking is disabled");
+ if (!check_builtin_idle_enabled())
return -EBUSY;
- }
return scx_pick_idle_cpu(cpus_allowed, flags);
}
Minor refactoring to add a helper function for checking if the built-in idle CPU selection policy is enabled. Signed-off-by: Andrea Righi <arighi@nvidia.com> --- kernel/sched/ext_idle.c | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-)