Message ID | 20220620224503.3841196-5-paulmck@kernel.org (mailing list archive) |
---|---|
State | Accepted |
Commit | 5103850654fdc651f0a7076ac753b958f018bb85 |
Headers | show |
Series | Callback-offload (nocb) updates for v5.20 | expand |
On 6/21/2022 4:15 AM, Paul E. McKenney wrote: > From: Zqiang <qiang1.zhang@intel.com> > > Callbacks are invoked in RCU kthreads when calbacks are offloaded > (rcu_nocbs boot parameter) or when RCU's softirq handler has been > offloaded to rcuc kthreads (use_softirq==0). The current code allows > for the rcu_nocbs case but not the use_softirq case. This commit adds > support for the use_softirq case. > > Reported-by: kernel test robot <lkp@intel.com> > Signed-off-by: Zqiang <qiang1.zhang@intel.com> > Signed-off-by: Paul E. McKenney <paulmck@kernel.org> > --- Reviewed-by: Neeraj Upadhyay <quic_neeraju@quicinc.com> Thanks Neeraj > kernel/rcu/tree.c | 4 ++-- > kernel/rcu/tree.h | 2 +- > kernel/rcu/tree_plugin.h | 33 +++++++++++++++++++-------------- > 3 files changed, 22 insertions(+), 17 deletions(-) > > diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c > index c25ba442044a6..74455671e6cf2 100644 > --- a/kernel/rcu/tree.c > +++ b/kernel/rcu/tree.c > @@ -2530,7 +2530,7 @@ static void rcu_do_batch(struct rcu_data *rdp) > trace_rcu_batch_end(rcu_state.name, 0, > !rcu_segcblist_empty(&rdp->cblist), > need_resched(), is_idle_task(current), > - rcu_is_callbacks_kthread()); > + rcu_is_callbacks_kthread(rdp)); > return; > } > > @@ -2608,7 +2608,7 @@ static void rcu_do_batch(struct rcu_data *rdp) > rcu_nocb_lock_irqsave(rdp, flags); > rdp->n_cbs_invoked += count; > trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(), > - is_idle_task(current), rcu_is_callbacks_kthread()); > + is_idle_task(current), rcu_is_callbacks_kthread(rdp)); > > /* Update counts and requeue any remaining callbacks. */ > rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl); > diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h > index 4f8532c33558f..649ad4f0129b1 100644 > --- a/kernel/rcu/tree.h > +++ b/kernel/rcu/tree.h > @@ -426,7 +426,7 @@ static void rcu_flavor_sched_clock_irq(int user); > static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck); > static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); > static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); > -static bool rcu_is_callbacks_kthread(void); > +static bool rcu_is_callbacks_kthread(struct rcu_data *rdp); > static void rcu_cpu_kthread_setup(unsigned int cpu); > static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp); > static bool rcu_preempt_has_tasks(struct rcu_node *rnp); > diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h > index c8ba0fe17267c..0483e1338c413 100644 > --- a/kernel/rcu/tree_plugin.h > +++ b/kernel/rcu/tree_plugin.h > @@ -1012,6 +1012,25 @@ static void rcu_cpu_kthread_setup(unsigned int cpu) > WRITE_ONCE(rdp->rcuc_activity, jiffies); > } > > +static bool rcu_is_callbacks_nocb_kthread(struct rcu_data *rdp) > +{ > +#ifdef CONFIG_RCU_NOCB_CPU > + return rdp->nocb_cb_kthread == current; > +#else > + return false; > +#endif > +} > + > +/* > + * Is the current CPU running the RCU-callbacks kthread? > + * Caller must have preemption disabled. > + */ > +static bool rcu_is_callbacks_kthread(struct rcu_data *rdp) > +{ > + return rdp->rcu_cpu_kthread_task == current || > + rcu_is_callbacks_nocb_kthread(rdp); > +} > + > #ifdef CONFIG_RCU_BOOST > > /* > @@ -1151,15 +1170,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) > } > } > > -/* > - * Is the current CPU running the RCU-callbacks kthread? > - * Caller must have preemption disabled. > - */ > -static bool rcu_is_callbacks_kthread(void) > -{ > - return __this_cpu_read(rcu_data.rcu_cpu_kthread_task) == current; > -} > - > #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000) > > /* > @@ -1242,11 +1252,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) > raw_spin_unlock_irqrestore_rcu_node(rnp, flags); > } > > -static bool rcu_is_callbacks_kthread(void) > -{ > - return false; > -} > - > static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) > { > }
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index c25ba442044a6..74455671e6cf2 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -2530,7 +2530,7 @@ static void rcu_do_batch(struct rcu_data *rdp) trace_rcu_batch_end(rcu_state.name, 0, !rcu_segcblist_empty(&rdp->cblist), need_resched(), is_idle_task(current), - rcu_is_callbacks_kthread()); + rcu_is_callbacks_kthread(rdp)); return; } @@ -2608,7 +2608,7 @@ static void rcu_do_batch(struct rcu_data *rdp) rcu_nocb_lock_irqsave(rdp, flags); rdp->n_cbs_invoked += count; trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(), - is_idle_task(current), rcu_is_callbacks_kthread()); + is_idle_task(current), rcu_is_callbacks_kthread(rdp)); /* Update counts and requeue any remaining callbacks. */ rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl); diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 4f8532c33558f..649ad4f0129b1 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -426,7 +426,7 @@ static void rcu_flavor_sched_clock_irq(int user); static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck); static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); -static bool rcu_is_callbacks_kthread(void); +static bool rcu_is_callbacks_kthread(struct rcu_data *rdp); static void rcu_cpu_kthread_setup(unsigned int cpu); static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp); static bool rcu_preempt_has_tasks(struct rcu_node *rnp); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index c8ba0fe17267c..0483e1338c413 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -1012,6 +1012,25 @@ static void rcu_cpu_kthread_setup(unsigned int cpu) WRITE_ONCE(rdp->rcuc_activity, jiffies); } +static bool rcu_is_callbacks_nocb_kthread(struct rcu_data *rdp) +{ +#ifdef CONFIG_RCU_NOCB_CPU + return rdp->nocb_cb_kthread == current; +#else + return false; +#endif +} + +/* + * Is the current CPU running the RCU-callbacks kthread? + * Caller must have preemption disabled. + */ +static bool rcu_is_callbacks_kthread(struct rcu_data *rdp) +{ + return rdp->rcu_cpu_kthread_task == current || + rcu_is_callbacks_nocb_kthread(rdp); +} + #ifdef CONFIG_RCU_BOOST /* @@ -1151,15 +1170,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) } } -/* - * Is the current CPU running the RCU-callbacks kthread? - * Caller must have preemption disabled. - */ -static bool rcu_is_callbacks_kthread(void) -{ - return __this_cpu_read(rcu_data.rcu_cpu_kthread_task) == current; -} - #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000) /* @@ -1242,11 +1252,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) raw_spin_unlock_irqrestore_rcu_node(rnp, flags); } -static bool rcu_is_callbacks_kthread(void) -{ - return false; -} - static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) { }