@@ -262,6 +262,7 @@ static void rcu_report_exp_rdp(struct rcu_data *rdp)
raw_spin_lock_irqsave_rcu_node(rnp, flags);
WRITE_ONCE(rdp->cpu_no_qs.b.exp, false);
+ ASSERT_EXCLUSIVE_WRITER(rdp->cpu_no_qs.b.exp);
rcu_report_exp_cpu_mult(rnp, flags, rdp->grpmask, true);
}
@@ -721,6 +722,7 @@ static void rcu_exp_sel_wait_wake(unsigned long s)
/* Request an expedited quiescent state. */
static void rcu_exp_need_qs(void)
{
+ ASSERT_EXCLUSIVE_WRITER_SCOPED(*this_cpu_ptr(&rcu_data.cpu_no_qs.b.exp));
__this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
/* Store .exp before .rcu_urgent_qs. */
smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
@@ -753,6 +755,7 @@ static void rcu_exp_handler(void *unused)
* sync_sched_exp_online_cleanup() implementation being a no-op,
* so warn if this does happen.
*/
+ ASSERT_EXCLUSIVE_WRITER_SCOPED(rdp->cpu_no_qs.b.exp);
if (WARN_ON_ONCE(!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
READ_ONCE(rdp->cpu_no_qs.b.exp)))
return;
@@ -867,6 +870,7 @@ static void rcu_exp_handler(void *unused)
struct rcu_node *rnp = rdp->mynode;
bool preempt_bh_enabled = !(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK));
+ ASSERT_EXCLUSIVE_WRITER_SCOPED(rdp->cpu_no_qs.b.exp);
if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
__this_cpu_read(rcu_data.cpu_no_qs.b.exp))
return;
@@ -275,6 +275,7 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
rcu_report_exp_rdp(rdp);
else
WARN_ON_ONCE(rdp->cpu_no_qs.b.exp);
+ ASSERT_EXCLUSIVE_WRITER_SCOPED(rdp->cpu_no_qs.b.exp);
}
/*
The value of rdp->cpu_no_qs.b.exp may be changed only by the corresponding CPU, and that CPU is not even allowed to race with itself, for example, via interrupt handlers. This commit therefore adds KCSAN exclusive-writer assertions to check this constraint. Signed-off-by: Paul E. McKenney <paulmck@kernel.org> Cc: Frederic Weisbecker <frederic@kernel.org> --- kernel/rcu/tree_exp.h | 4 ++++ kernel/rcu/tree_plugin.h | 1 + 2 files changed, 5 insertions(+)