@@ -4815,6 +4815,22 @@ rcu_boot_init_percpu_data(int cpu)
rcu_boot_init_nocb_percpu_data(rdp);
}
+static void rcu_thread_affine_rnp(struct task_struct *t, struct rcu_node *rnp)
+{
+ cpumask_var_t affinity;
+ int cpu;
+
+ if (!zalloc_cpumask_var(&affinity, GFP_KERNEL))
+ return;
+
+ for_each_leaf_node_possible_cpu(rnp, cpu)
+ cpumask_set_cpu(cpu, affinity);
+
+ kthread_affine_preferred(t, affinity);
+
+ free_cpumask_var(affinity);
+}
+
struct kthread_worker *rcu_exp_gp_kworker;
static void rcu_spawn_exp_par_gp_kworker(struct rcu_node *rnp)
@@ -4827,7 +4843,7 @@ static void rcu_spawn_exp_par_gp_kworker(struct rcu_node *rnp)
if (rnp->exp_kworker)
return;
- kworker = kthread_run_worker(0, name, rnp_index);
+ kworker = kthread_create_worker(0, name, rnp_index);
if (IS_ERR_OR_NULL(kworker)) {
pr_err("Failed to create par gp kworker on %d/%d\n",
rnp->grplo, rnp->grphi);
@@ -4837,16 +4853,9 @@ static void rcu_spawn_exp_par_gp_kworker(struct rcu_node *rnp)
if (IS_ENABLED(CONFIG_RCU_EXP_KTHREAD))
sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, ¶m);
-}
-static struct task_struct *rcu_exp_par_gp_task(struct rcu_node *rnp)
-{
- struct kthread_worker *kworker = READ_ONCE(rnp->exp_kworker);
-
- if (!kworker)
- return NULL;
-
- return kworker->task;
+ rcu_thread_affine_rnp(kworker->task, rnp);
+ wake_up_process(kworker->task);
}
static void __init rcu_start_exp_gp_kworker(void)
@@ -4931,79 +4940,6 @@ int rcutree_prepare_cpu(unsigned int cpu)
return 0;
}
-static void rcu_thread_affine_rnp(struct task_struct *t, struct rcu_node *rnp)
-{
- cpumask_var_t affinity;
- int cpu;
-
- if (!zalloc_cpumask_var(&affinity, GFP_KERNEL))
- return;
-
- for_each_leaf_node_possible_cpu(rnp, cpu)
- cpumask_set_cpu(cpu, affinity);
-
- kthread_affine_preferred(t, affinity);
-
- free_cpumask_var(affinity);
-}
-
-/*
- * Update kthreads affinity during CPU-hotplug changes.
- *
- * Set the per-rcu_node kthread's affinity to cover all CPUs that are
- * served by the rcu_node in question. The CPU hotplug lock is still
- * held, so the value of rnp->qsmaskinit will be stable.
- *
- * We don't include outgoingcpu in the affinity set, use -1 if there is
- * no outgoing CPU. If there are no CPUs left in the affinity set,
- * this function allows the kthread to execute on any CPU.
- *
- * Any future concurrent calls are serialized via ->kthread_mutex.
- */
-static void rcutree_affinity_setting(unsigned int cpu, int outgoingcpu)
-{
- cpumask_var_t cm;
- unsigned long mask;
- struct rcu_data *rdp;
- struct rcu_node *rnp;
- struct task_struct *task_exp;
-
- rdp = per_cpu_ptr(&rcu_data, cpu);
- rnp = rdp->mynode;
-
- task_exp = rcu_exp_par_gp_task(rnp);
-
- /*
- * If CPU is the boot one, this task is created later from early
- * initcall since kthreadd must be created first.
- */
- if (!task_exp)
- return;
-
- if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
- return;
-
- mutex_lock(&rnp->kthread_mutex);
- mask = rcu_rnp_online_cpus(rnp);
- for_each_leaf_node_possible_cpu(rnp, cpu)
- if ((mask & leaf_node_cpu_bit(rnp, cpu)) &&
- cpu != outgoingcpu)
- cpumask_set_cpu(cpu, cm);
- cpumask_and(cm, cm, housekeeping_cpumask(HK_TYPE_RCU));
- if (cpumask_empty(cm)) {
- cpumask_copy(cm, housekeeping_cpumask(HK_TYPE_RCU));
- if (outgoingcpu >= 0)
- cpumask_clear_cpu(outgoingcpu, cm);
- }
-
- if (task_exp)
- set_cpus_allowed_ptr(task_exp, cm);
-
- mutex_unlock(&rnp->kthread_mutex);
-
- free_cpumask_var(cm);
-}
-
/*
* Has the specified (known valid) CPU ever been fully online?
*/
@@ -5032,7 +4968,6 @@ int rcutree_online_cpu(unsigned int cpu)
if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
return 0; /* Too early in boot for scheduler work. */
sync_sched_exp_online_cleanup(cpu);
- rcutree_affinity_setting(cpu, -1);
// Stop-machine done, so allow nohz_full to disable tick.
tick_dep_clear(TICK_DEP_BIT_RCU);
@@ -5249,8 +5184,6 @@ int rcutree_offline_cpu(unsigned int cpu)
rnp->ffmask &= ~rdp->grpmask;
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
- rcutree_affinity_setting(cpu, cpu);
-
// nohz_full CPUs need the tick for stop-machine to work quickly
tick_dep_set(TICK_DEP_BIT_RCU);
return 0;