new file mode 100644
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_SCHED_EXT_H
+#define _LINUX_SCHED_EXT_H
+
+#ifdef CONFIG_SCHED_CLASS_EXT
+#error "NOT IMPLEMENTED YET"
+#else /* !CONFIG_SCHED_CLASS_EXT */
+
+static inline void sched_ext_free(struct task_struct *p) {}
+
+#endif /* CONFIG_SCHED_CLASS_EXT */
+#endif /* _LINUX_SCHED_EXT_H */
@@ -23,6 +23,7 @@
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/sched/cputime.h>
+#include <linux/sched/ext.h>
#include <linux/seq_file.h>
#include <linux/rtmutex.h>
#include <linux/init.h>
@@ -833,6 +834,7 @@ void __put_task_struct(struct task_struct *tsk)
WARN_ON(refcount_read(&tsk->usage));
WARN_ON(tsk == current);
+ sched_ext_free(tsk);
io_uring_free(tsk);
cgroup_free(tsk);
task_numa_free(tsk, true);
@@ -4554,6 +4554,8 @@ late_initcall(sched_core_sysctl_init);
*/
int sched_fork(unsigned long clone_flags, struct task_struct *p)
{
+ int ret;
+
__sched_fork(clone_flags, p);
/*
* We mark the process as NEW here. This guarantees that
@@ -4590,12 +4592,16 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
p->sched_reset_on_fork = 0;
}
- if (dl_prio(p->prio))
- return -EAGAIN;
- else if (rt_prio(p->prio))
+ scx_pre_fork(p);
+
+ if (dl_prio(p->prio)) {
+ ret = -EAGAIN;
+ goto out_cancel;
+ } else if (rt_prio(p->prio)) {
p->sched_class = &rt_sched_class;
- else
+ } else {
p->sched_class = &fair_sched_class;
+ }
init_entity_runnable_average(&p->se);
@@ -4613,6 +4619,10 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
RB_CLEAR_NODE(&p->pushable_dl_tasks);
#endif
return 0;
+
+out_cancel:
+ scx_cancel_fork(p);
+ return ret;
}
int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
@@ -4643,16 +4653,18 @@ int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
p->sched_class->task_fork(p);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
- return 0;
+ return scx_fork(p);
}
void sched_cancel_fork(struct task_struct *p)
{
+ scx_cancel_fork(p);
}
void sched_post_fork(struct task_struct *p)
{
uclamp_post_fork(p);
+ scx_post_fork(p);
}
unsigned long to_ratio(u64 period, u64 runtime)
@@ -5800,10 +5812,13 @@ static void put_prev_task_balance(struct rq *rq, struct task_struct *prev,
* We can terminate the balance pass as soon as we know there is
* a runnable task of @class priority or higher.
*/
- for_class_range(class, prev->sched_class, &idle_sched_class) {
+ for_balance_class_range(class, prev->sched_class, &idle_sched_class) {
if (class->balance(rq, prev, rf))
break;
}
+#else
+ /* SCX needs the balance call even in UP, call it explicitly */
+ balance_scx_on_up(rq, prev, rf);
#endif
put_prev_task(rq, prev);
@@ -5818,6 +5833,9 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
const struct sched_class *class;
struct task_struct *p;
+ if (scx_enabled())
+ goto restart;
+
/*
* Optimization: we know that if all tasks are in the fair class we can
* call that function directly, but only if the @prev task wasn't of a
@@ -5843,7 +5861,7 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
restart:
put_prev_task_balance(rq, prev, rf);
- for_each_class(class) {
+ for_each_active_class(class) {
p = class->pick_next_task(rq);
if (p)
return p;
@@ -5876,7 +5894,7 @@ static inline struct task_struct *pick_task(struct rq *rq)
const struct sched_class *class;
struct task_struct *p;
- for_each_class(class) {
+ for_each_active_class(class) {
p = class->pick_task(rq);
if (p)
return p;
@@ -9810,6 +9828,7 @@ void __init sched_init(void)
balance_push_set(smp_processor_id(), false);
#endif
init_sched_fair_class();
+ init_sched_ext_class();
psi_init();
new file mode 100644
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifdef CONFIG_SCHED_CLASS_EXT
+#error "NOT IMPLEMENTED YET"
+#else /* CONFIG_SCHED_CLASS_EXT */
+
+#define scx_enabled() false
+
+static inline void scx_pre_fork(struct task_struct *p) {}
+static inline int scx_fork(struct task_struct *p) { return 0; }
+static inline void scx_post_fork(struct task_struct *p) {}
+static inline void scx_cancel_fork(struct task_struct *p) {}
+static inline int balance_scx(struct rq *rq, struct task_struct *prev,
+ struct rq_flags *rf) { return 0; }
+static inline void init_sched_ext_class(void) {}
+
+#define for_each_active_class for_each_class
+#define for_balance_class_range for_class_range
+
+#endif /* CONFIG_SCHED_CLASS_EXT */
+
+#ifndef CONFIG_SMP
+static inline void balance_scx_on_up(struct rq *rq, struct task_struct *prev,
+ struct rq_flags *rf)
+{
+ balance_scx(rq, prev, rf);
+}
+#endif
+
+#if defined(CONFIG_SCHED_CLASS_EXT) && defined(CONFIG_SMP)
+#error "NOT IMPLEMENTED YET"
+#else
+static inline void scx_update_idle(struct rq *rq, bool idle) {}
+#endif
@@ -428,11 +428,13 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
{
+ scx_update_idle(rq, false);
}
static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first)
{
update_idle_core(rq);
+ scx_update_idle(rq, true);
schedstat_inc(rq->sched_goidle);
}
@@ -3252,4 +3252,6 @@ enum cpu_cftype_id {
extern struct cftype cpu_cftypes[CPU_CFTYPE_CNT + 1];
#endif /* CONFIG_CGROUP_SCHED */
+#include "ext.h"
+
#endif /* _KERNEL_SCHED_SCHED_H */