diff mbox series

[2/7] rcu/nocb: Invert rcu_state.barrier_mutex VS hotplug lock locking order

Message ID 20220620224503.3841196-2-paulmck@kernel.org (mailing list archive)
State Accepted
Commit 24a57affd242566fef2935f04f062350b8275187
Headers show
Series Callback-offload (nocb) updates for v5.20 | expand

Commit Message

Paul E. McKenney June 20, 2022, 10:44 p.m. UTC
From: Zqiang <qiang1.zhang@intel.com>

In case of failure to spawn either rcuog or rcuo[p] kthreads for a given
rdp, rcu_nocb_rdp_deoffload() needs to be called with the hotplug
lock and the barrier_mutex held. However cpus write lock is already held
while calling rcutree_prepare_cpu(). It's not possible to call
rcu_nocb_rdp_deoffload() from there with just locking the barrier_mutex
or this would result in a locking inversion against
rcu_nocb_cpu_deoffload() which holds both locks in the reverse order.

Simply solve this with inverting the locking order inside
rcu_nocb_cpu_[de]offload(). This will be a pre-requisite to toggle NOCB
states toward cpusets anyway.

Signed-off-by: Zqiang <qiang1.zhang@intel.com>
Cc: Neeraj Upadhyay <quic_neeraju@quicinc.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Uladzislau Rezki <uladzislau.rezki@sony.com>
Cc: Joel Fernandes <joel@joelfernandes.org>
Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
---
 kernel/rcu/tree_nocb.h | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

Comments

Neeraj Upadhyay July 19, 2022, 9:28 a.m. UTC | #1
On 6/21/2022 4:14 AM, Paul E. McKenney wrote:
> From: Zqiang <qiang1.zhang@intel.com>
> 
> In case of failure to spawn either rcuog or rcuo[p] kthreads for a given
> rdp, rcu_nocb_rdp_deoffload() needs to be called with the hotplug
> lock and the barrier_mutex held. However cpus write lock is already held
> while calling rcutree_prepare_cpu(). It's not possible to call
> rcu_nocb_rdp_deoffload() from there with just locking the barrier_mutex
> or this would result in a locking inversion against
> rcu_nocb_cpu_deoffload() which holds both locks in the reverse order.
> 
> Simply solve this with inverting the locking order inside
> rcu_nocb_cpu_[de]offload(). This will be a pre-requisite to toggle NOCB
> states toward cpusets anyway.
> 
> Signed-off-by: Zqiang <qiang1.zhang@intel.com>
> Cc: Neeraj Upadhyay <quic_neeraju@quicinc.com>
> Cc: Boqun Feng <boqun.feng@gmail.com>
> Cc: Uladzislau Rezki <uladzislau.rezki@sony.com>
> Cc: Joel Fernandes <joel@joelfernandes.org>
> Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
> ---

Reviewed-by: Neeraj Upadhyay <quic_neeraju@quicinc.com>


Thanks
Neeraj

>   kernel/rcu/tree_nocb.h | 8 ++++----
>   1 file changed, 4 insertions(+), 4 deletions(-)
> 
> diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
> index dac74952e1d1b..f2f2cab6285a1 100644
> --- a/kernel/rcu/tree_nocb.h
> +++ b/kernel/rcu/tree_nocb.h
> @@ -1055,8 +1055,8 @@ int rcu_nocb_cpu_deoffload(int cpu)
>   	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
>   	int ret = 0;
>   
> -	mutex_lock(&rcu_state.barrier_mutex);
>   	cpus_read_lock();
> +	mutex_lock(&rcu_state.barrier_mutex);
>   	if (rcu_rdp_is_offloaded(rdp)) {
>   		if (cpu_online(cpu)) {
>   			ret = work_on_cpu(cpu, rcu_nocb_rdp_deoffload, rdp);
> @@ -1067,8 +1067,8 @@ int rcu_nocb_cpu_deoffload(int cpu)
>   			ret = -EINVAL;
>   		}
>   	}
> -	cpus_read_unlock();
>   	mutex_unlock(&rcu_state.barrier_mutex);
> +	cpus_read_unlock();
>   
>   	return ret;
>   }
> @@ -1134,8 +1134,8 @@ int rcu_nocb_cpu_offload(int cpu)
>   	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
>   	int ret = 0;
>   
> -	mutex_lock(&rcu_state.barrier_mutex);
>   	cpus_read_lock();
> +	mutex_lock(&rcu_state.barrier_mutex);
>   	if (!rcu_rdp_is_offloaded(rdp)) {
>   		if (cpu_online(cpu)) {
>   			ret = work_on_cpu(cpu, rcu_nocb_rdp_offload, rdp);
> @@ -1146,8 +1146,8 @@ int rcu_nocb_cpu_offload(int cpu)
>   			ret = -EINVAL;
>   		}
>   	}
> -	cpus_read_unlock();
>   	mutex_unlock(&rcu_state.barrier_mutex);
> +	cpus_read_unlock();
>   
>   	return ret;
>   }
diff mbox series

Patch

diff --git a/kernel/rcu/tree_nocb.h b/kernel/rcu/tree_nocb.h
index dac74952e1d1b..f2f2cab6285a1 100644
--- a/kernel/rcu/tree_nocb.h
+++ b/kernel/rcu/tree_nocb.h
@@ -1055,8 +1055,8 @@  int rcu_nocb_cpu_deoffload(int cpu)
 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
 	int ret = 0;
 
-	mutex_lock(&rcu_state.barrier_mutex);
 	cpus_read_lock();
+	mutex_lock(&rcu_state.barrier_mutex);
 	if (rcu_rdp_is_offloaded(rdp)) {
 		if (cpu_online(cpu)) {
 			ret = work_on_cpu(cpu, rcu_nocb_rdp_deoffload, rdp);
@@ -1067,8 +1067,8 @@  int rcu_nocb_cpu_deoffload(int cpu)
 			ret = -EINVAL;
 		}
 	}
-	cpus_read_unlock();
 	mutex_unlock(&rcu_state.barrier_mutex);
+	cpus_read_unlock();
 
 	return ret;
 }
@@ -1134,8 +1134,8 @@  int rcu_nocb_cpu_offload(int cpu)
 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
 	int ret = 0;
 
-	mutex_lock(&rcu_state.barrier_mutex);
 	cpus_read_lock();
+	mutex_lock(&rcu_state.barrier_mutex);
 	if (!rcu_rdp_is_offloaded(rdp)) {
 		if (cpu_online(cpu)) {
 			ret = work_on_cpu(cpu, rcu_nocb_rdp_offload, rdp);
@@ -1146,8 +1146,8 @@  int rcu_nocb_cpu_offload(int cpu)
 			ret = -EINVAL;
 		}
 	}
-	cpus_read_unlock();
 	mutex_unlock(&rcu_state.barrier_mutex);
+	cpus_read_unlock();
 
 	return ret;
 }