@@ -1691,6 +1691,7 @@ extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *n
*/
extern struct task_struct *find_get_task_by_vpid(pid_t nr);
+extern bool cpu_rq_is_locked(int cpu);
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
extern void wake_up_new_task(struct task_struct *tsk);
@@ -73,6 +73,14 @@ __read_mostly int scheduler_running;
*/
int sysctl_sched_rt_runtime = 950000;
+bool cpu_rq_is_locked(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ return raw_spin_is_locked(&rq->lock) ? true : false;
+}
+EXPORT_SYMBOL_GPL(cpu_rq_is_locked);
+
/*
* __task_rq_lock - lock the rq @p resides on.
*/
This adds API cpu_rq_is_locked() to check if the CPU's runqueue has been locked or not. It's used in the subsequent patch to determine the task wakeup should be executed immediately or delayed. Signed-off-by: Gavin Shan <gshan@redhat.com> --- include/linux/sched.h | 1 + kernel/sched/core.c | 8 ++++++++ 2 files changed, 9 insertions(+)