@@ -809,10 +809,16 @@ static void rcu_tasks_pregp_step(struct list_head *hop)
synchronize_rcu();
}
+static bool task_is_on_dyntick_idle(struct task_struct *t)
+{
+ return IS_ENABLED(CONFIG_NO_HZ_FULL) && t->rcu_tasks_idle_cpu >= 0;
+}
+
/* Per-task initial processing. */
static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
{
- if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) {
+ if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t) &&
+ !task_is_on_dyntick_idle(t)) {
get_task_struct(t);
t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
WRITE_ONCE(t->rcu_tasks_holdout, true);
@@ -842,8 +848,7 @@ static void check_holdout_task(struct task_struct *t,
if (!READ_ONCE(t->rcu_tasks_holdout) ||
t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
!READ_ONCE(t->on_rq) ||
- (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
- !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
+ task_is_on_dyntick_idle(t)) {
WRITE_ONCE(t->rcu_tasks_holdout, false);
list_del_init(&t->rcu_tasks_holdout_list);
put_task_struct(t);
When the RCU Tasks scanning on-rq tasks, there is no need to scanning no-idle tasks(have invoked rcu_user_enter()) which record on dyntick-idle entry, at this time, these tasks are not remain within an RCU Tasks read-side critical section. This commit skip scanning tasks which record on dyntick-idle entry in rcu_tasks_pertask(). Reported-by: kernel test robot <lkp@intel.com> Signed-off-by: Zqiang <qiang1.zhang@intel.com> --- kernel/rcu/tasks.h | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-)