@@ -4312,6 +4312,7 @@ bool __sched yield_to(struct task_struct *p, bool preempt)
unsigned long flags;
bool yielded = 0;
+ get_online_cpus_atomic_light();
local_irq_save(flags);
rq = this_rq();
@@ -4339,13 +4340,14 @@ again:
* Make p's CPU reschedule; pick_next_entity takes care of
* fairness.
*/
- if (preempt && rq != p_rq)
+ if (preempt && rq != p_rq && cpu_online(task_cpu(p)))
resched_task(p_rq->curr);
}
out:
double_rq_unlock(rq, p_rq);
local_irq_restore(flags);
+ put_online_cpus_atomic_light();
if (yielded)
schedule();
Once stop_machine() is gone from the CPU offline path, we won't be able to depend on local_irq_save() to prevent CPUs from going offline from under us. Use the get/put_online_cpus_atomic_light() APIs to prevent changes to the cpu_online_mask, while invoking from atomic context. Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> --- kernel/sched/core.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) -- To unsubscribe from this list: send the line "unsubscribe linux-pm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html