diff mbox series

[v4,4/6] io_uring/napi: Use lock guards

Message ID 2680ca47ee183cfdb89d1a40c84d349edeb620ab.1728828877.git.olivier@trillion01.com (mailing list archive)
State New
Headers show
Series napi tracking strategy | expand

Commit Message

Olivier Langlois Oct. 13, 2024, 6:29 p.m. UTC
Convert napi locks to use the shiny new Scope-Based Resource Management
machinery.

Signed-off-by: Olivier Langlois <olivier@trillion01.com>
---
 io_uring/napi.c | 40 +++++++++++++++++++++-------------------
 1 file changed, 21 insertions(+), 19 deletions(-)
diff mbox series

Patch

diff --git a/io_uring/napi.c b/io_uring/napi.c
index 5e2299e7ff8e..6d5fdd397f2f 100644
--- a/io_uring/napi.c
+++ b/io_uring/napi.c
@@ -49,14 +49,13 @@  int __io_napi_add_id(struct io_ring_ctx *ctx, unsigned int napi_id)
 
 	hash_list = &ctx->napi_ht[hash_min(napi_id, HASH_BITS(ctx->napi_ht))];
 
-	rcu_read_lock();
-	e = io_napi_hash_find(hash_list, napi_id);
-	if (e) {
-		WRITE_ONCE(e->timeout, jiffies + NAPI_TIMEOUT);
-		rcu_read_unlock();
-		return -EEXIST;
+	scoped_guard(rcu) {
+		e = io_napi_hash_find(hash_list, napi_id);
+		if (e) {
+			WRITE_ONCE(e->timeout, jiffies + NAPI_TIMEOUT);
+			return -EEXIST;
+		}
 	}
-	rcu_read_unlock();
 
 	e = kmalloc(sizeof(*e), GFP_NOWAIT);
 	if (!e)
@@ -65,6 +64,10 @@  int __io_napi_add_id(struct io_ring_ctx *ctx, unsigned int napi_id)
 	e->napi_id = napi_id;
 	e->timeout = jiffies + NAPI_TIMEOUT;
 
+	/*
+	 * guard(spinlock) is not used to manually unlock it before calling
+	 * kfree()
+	 */
 	spin_lock(&ctx->napi_lock);
 	if (unlikely(io_napi_hash_find(hash_list, napi_id))) {
 		spin_unlock(&ctx->napi_lock);
@@ -82,7 +85,7 @@  static void __io_napi_remove_stale(struct io_ring_ctx *ctx)
 {
 	struct io_napi_entry *e;
 
-	spin_lock(&ctx->napi_lock);
+	guard(spinlock)(&ctx->napi_lock);
 	/*
 	 * list_for_each_entry_safe() is not required as long as:
 	 * 1. list_del_rcu() does not reset the deleted node next pointer
@@ -96,7 +99,6 @@  static void __io_napi_remove_stale(struct io_ring_ctx *ctx)
 			kfree_rcu(e, rcu);
 		}
 	}
-	spin_unlock(&ctx->napi_lock);
 }
 
 static inline void io_napi_remove_stale(struct io_ring_ctx *ctx, bool is_stale)
@@ -168,11 +170,12 @@  static void io_napi_blocking_busy_loop(struct io_ring_ctx *ctx,
 	if (list_is_singular(&ctx->napi_list))
 		loop_end_arg = iowq;
 
-	rcu_read_lock();
-	do {
-		is_stale = __io_napi_do_busy_loop(ctx, loop_end_arg);
-	} while (!io_napi_busy_loop_should_end(iowq, start_time) && !loop_end_arg);
-	rcu_read_unlock();
+	scoped_guard(rcu) {
+		do {
+			is_stale = __io_napi_do_busy_loop(ctx, loop_end_arg);
+		} while (!io_napi_busy_loop_should_end(iowq, start_time) &&
+			 !loop_end_arg);
+	}
 
 	io_napi_remove_stale(ctx, is_stale);
 }
@@ -203,13 +206,12 @@  void io_napi_free(struct io_ring_ctx *ctx)
 {
 	struct io_napi_entry *e;
 
-	spin_lock(&ctx->napi_lock);
+	guard(spinlock)(&ctx->napi_lock);
 	list_for_each_entry(e, &ctx->napi_list, list) {
 		hash_del_rcu(&e->node);
 		kfree_rcu(e, rcu);
 	}
 	INIT_LIST_HEAD_RCU(&ctx->napi_list);
-	spin_unlock(&ctx->napi_lock);
 }
 
 /*
@@ -305,9 +307,9 @@  int io_napi_sqpoll_busy_poll(struct io_ring_ctx *ctx)
 	if (list_empty_careful(&ctx->napi_list))
 		return 0;
 
-	rcu_read_lock();
-	is_stale = __io_napi_do_busy_loop(ctx, NULL);
-	rcu_read_unlock();
+	scoped_guard(rcu) {
+		is_stale = __io_napi_do_busy_loop(ctx, NULL);
+	}
 
 	io_napi_remove_stale(ctx, is_stale);
 	return 1;