diff mbox series

[3/4] rcu/kvfree: Move need_offload_krc() out of krcp->lock

Message ID 20221128153628.541361-3-urezki@gmail.com (mailing list archive)
State New, archived
Headers show
Series [1/4] rcu/kvfree: Switch to a generic linked list API | expand

Commit Message

Uladzislau Rezki Nov. 28, 2022, 3:36 p.m. UTC
Currently a need_offload_krc() function requires the krcp->lock
to be held because krcp->head can not be checked concurrently.

Fix it by updating the krcp->head using WRITE_ONCE() macro so
it becomes lock-free and safe for readers to see a valid data
without any locking.

Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
---
 kernel/rcu/tree.c | 11 ++++-------
 1 file changed, 4 insertions(+), 7 deletions(-)
diff mbox series

Patch

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 3b5f6036d884..f68ddbef2a33 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3218,7 +3218,7 @@  static void kfree_rcu_monitor(struct work_struct *work)
 			// objects queued on the linked list.
 			if (!krwp->head_free) {
 				krwp->head_free = krcp->head;
-				krcp->head = NULL;
+				WRITE_ONCE(krcp->head, NULL);
 			}
 
 			WRITE_ONCE(krcp->count, 0);
@@ -3232,6 +3232,8 @@  static void kfree_rcu_monitor(struct work_struct *work)
 		}
 	}
 
+	raw_spin_unlock_irqrestore(&krcp->lock, flags);
+
 	// If there is nothing to detach, it means that our job is
 	// successfully done here. In case of having at least one
 	// of the channels that is still busy we should rearm the
@@ -3239,8 +3241,6 @@  static void kfree_rcu_monitor(struct work_struct *work)
 	// still in progress.
 	if (need_offload_krc(krcp))
 		schedule_delayed_monitor_work(krcp);
-
-	raw_spin_unlock_irqrestore(&krcp->lock, flags);
 }
 
 static enum hrtimer_restart
@@ -3415,7 +3415,7 @@  void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
 
 		head->func = func;
 		head->next = krcp->head;
-		krcp->head = head;
+		WRITE_ONCE(krcp->head, head);
 		success = true;
 	}
 
@@ -3492,15 +3492,12 @@  static struct shrinker kfree_rcu_shrinker = {
 void __init kfree_rcu_scheduler_running(void)
 {
 	int cpu;
-	unsigned long flags;
 
 	for_each_possible_cpu(cpu) {
 		struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
 
-		raw_spin_lock_irqsave(&krcp->lock, flags);
 		if (need_offload_krc(krcp))
 			schedule_delayed_monitor_work(krcp);
-		raw_spin_unlock_irqrestore(&krcp->lock, flags);
 	}
 }