diff mbox series

[3/4] rcu/kvfree: Use polled API in a slow path

Message ID 20240828110929.3713-3-urezki@gmail.com (mailing list archive)
State New
Headers show
Series [1/4] rcu/kvfree: Support dynamic rcu_head for single argument objects | expand

Commit Message

Uladzislau Rezki Aug. 28, 2024, 11:09 a.m. UTC
For a single argument use a polled API to see if a GP is
already passed. This allows to bypass an extra GP request
in a slow path.

Allocating a page or dynamic rcu_head might take some and
still fail, in that scenario a GP which is requested on entry
of kvfree_call_rcu() can be elapsed. Benefit from this.

Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
---
 kernel/rcu/tree.c | 10 ++++++++--
 1 file changed, 8 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 893ee69d4a4b..030a453f36c6 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3845,6 +3845,7 @@  attach_rcu_head_to_object(void *obj)
 void kvfree_call_rcu(struct rcu_head *head, void *ptr)
 {
 	unsigned long flags;
+	struct rcu_gp_oldstate old_snap;
 	struct kfree_rcu_cpu *krcp;
 	bool success;
 
@@ -3855,8 +3856,10 @@  void kvfree_call_rcu(struct rcu_head *head, void *ptr)
 	 * only. For other places please embed an rcu_head to
 	 * your data.
 	 */
-	if (!head)
+	if (!head) {
 		might_sleep();
+		start_poll_synchronize_rcu_full(&old_snap);
+	}
 
 	// Queue the object but don't yet schedule the batch.
 	if (debug_rcu_head_queue(ptr)) {
@@ -3917,7 +3920,10 @@  void kvfree_call_rcu(struct rcu_head *head, void *ptr)
 	 */
 	if (!success) {
 		debug_rcu_head_unqueue((struct rcu_head *) ptr);
-		synchronize_rcu();
+
+		if (!poll_state_synchronize_rcu_full(&old_snap))
+			synchronize_rcu();
+
 		kvfree(ptr);
 	}
 }