@@ -688,12 +688,12 @@ int on_each_cpu(void (*func) (void *info), void *info, int wait)
unsigned long flags;
int ret = 0;
- preempt_disable();
+ get_online_cpus_stable_atomic();
ret = smp_call_function(func, info, wait);
local_irq_save(flags);
func(info);
local_irq_restore(flags);
- preempt_enable();
+ put_online_cpus_stable_atomic();
return ret;
}
EXPORT_SYMBOL(on_each_cpu);
@@ -715,7 +715,11 @@ EXPORT_SYMBOL(on_each_cpu);
void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
void *info, bool wait)
{
- int cpu = get_cpu();
+ int cpu;
+
+ get_online_cpus_stable_atomic();
+
+ cpu = smp_processor_id();
smp_call_function_many(mask, func, info, wait);
if (cpumask_test_cpu(cpu, mask)) {
@@ -723,7 +727,7 @@ void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
func(info);
local_irq_enable();
}
- put_cpu();
+ put_online_cpus_stable_atomic();
}
EXPORT_SYMBOL(on_each_cpu_mask);
@@ -748,8 +752,10 @@ EXPORT_SYMBOL(on_each_cpu_mask);
* The function might sleep if the GFP flags indicates a non
* atomic allocation is allowed.
*
- * Preemption is disabled to protect against CPUs going offline but not online.
- * CPUs going online during the call will not be seen or sent an IPI.
+ * We use get/put_online_cpus_stable_atomic() to have a stable online mask
+ * to work with, whose CPUs won't go offline in-between our operation.
+ * And we will skip those CPUs which have already begun their offline journey.
+ * CPUs coming online during the call will not be seen or sent an IPI.
*
* You must not call this function with disabled interrupts or
* from a hardware interrupt handler or from a bottom half handler.
@@ -764,26 +770,26 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
might_sleep_if(gfp_flags & __GFP_WAIT);
if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
- preempt_disable();
- for_each_online_cpu(cpu)
+ get_online_cpus_stable_atomic();
+ for_each_online_cpu_stable(cpu)
if (cond_func(cpu, info))
cpumask_set_cpu(cpu, cpus);
on_each_cpu_mask(cpus, func, info, wait);
- preempt_enable();
+ put_online_cpus_stable_atomic();
free_cpumask_var(cpus);
} else {
/*
* No free cpumask, bother. No matter, we'll
* just have to IPI them one by one.
*/
- preempt_disable();
- for_each_online_cpu(cpu)
+ get_online_cpus_stable_atomic();
+ for_each_online_cpu_stable(cpu)
if (cond_func(cpu, info)) {
ret = smp_call_function_single(cpu, func,
info, wait);
WARN_ON_ONCE(!ret);
}
- preempt_enable();
+ put_online_cpus_stable_atomic();
}
}
EXPORT_SYMBOL(on_each_cpu_cond);
With stop_machine() gone from the CPU offline path, we can't depend on preempt_disable() to prevent CPUs from going offline from under us. Use the get/put_online_cpus_stable_atomic() APIs to prevent CPUs from going offline, while invoking from atomic context. Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> --- kernel/smp.c | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) -- To unsubscribe from this list: send the line "unsubscribe linux-pm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html