diff mbox series

[v2] rcu-tasks: Directly invoke rcuwait_wake_up() in call_rcu_tasks_generic()

Message ID 20230223085739.2594570-1-qiang1.zhang@intel.com (mailing list archive)
State New, archived
Headers show
Series [v2] rcu-tasks: Directly invoke rcuwait_wake_up() in call_rcu_tasks_generic() | expand

Commit Message

Zqiang Feb. 23, 2023, 8:57 a.m. UTC
According to commit '3063b33a347c ("Avoid raw-spinlocked wakeups
from call_rcu_tasks_generic()")', the grace-period kthread is delayed
to wakeup using irq_work_queue() is because if the caller of
call_rcu_tasks_generic() holds a raw-spinlock, when the kernel is
built with CONFIG_PROVE_RAW_LOCK_NESTING=y, due to a spinlock will
be hold in wake_up(), so the lockdep splats will happen. but now
using rcuwait_wake_up() to wakeup grace-period kthread instead of
wake_up(), in rcuwait_wake_up() using raw-spinlock instead of spinlock,
so this commit remove using irq_work_queue(), invoke rcuwait_wake_up()
directly in call_rcu_tasks_generic().

Signed-off-by: Zqiang <qiang1.zhang@intel.com>
---
 kernel/rcu/tasks.h | 16 +---------------
 1 file changed, 1 insertion(+), 15 deletions(-)
diff mbox series

Patch

diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
index baf7ec178155..757b8c6da1ad 100644
--- a/kernel/rcu/tasks.h
+++ b/kernel/rcu/tasks.h
@@ -39,7 +39,6 @@  struct rcu_tasks_percpu {
 	unsigned long rtp_jiffies;
 	unsigned long rtp_n_lock_retries;
 	struct work_struct rtp_work;
-	struct irq_work rtp_irq_work;
 	struct rcu_head barrier_q_head;
 	struct list_head rtp_blkd_tasks;
 	int cpu;
@@ -112,12 +111,9 @@  struct rcu_tasks {
 	char *kname;
 };
 
-static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp);
-
 #define DEFINE_RCU_TASKS(rt_name, gp, call, n)						\
 static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = {			\
 	.lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock),		\
-	.rtp_irq_work = IRQ_WORK_INIT_HARD(call_rcu_tasks_iw_wakeup),			\
 };											\
 static struct rcu_tasks rt_name =							\
 {											\
@@ -273,16 +269,6 @@  static void cblist_init_generic(struct rcu_tasks *rtp)
 	pr_info("%s: Setting shift to %d and lim to %d.\n", __func__, data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim));
 }
 
-// IRQ-work handler that does deferred wakeup for call_rcu_tasks_generic().
-static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp)
-{
-	struct rcu_tasks *rtp;
-	struct rcu_tasks_percpu *rtpcp = container_of(iwp, struct rcu_tasks_percpu, rtp_irq_work);
-
-	rtp = rtpcp->rtpp;
-	rcuwait_wake_up(&rtp->cbs_wait);
-}
-
 // Enqueue a callback for the specified flavor of Tasks RCU.
 static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
 				   struct rcu_tasks *rtp)
@@ -334,7 +320,7 @@  static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
 	rcu_read_unlock();
 	/* We can't create the thread unless interrupts are enabled. */
 	if (needwake && READ_ONCE(rtp->kthread_ptr))
-		irq_work_queue(&rtpcp->rtp_irq_work);
+		rcuwait_wake_up(&rtp->cbs_wait);
 }
 
 // RCU callback function for rcu_barrier_tasks_generic().