diff mbox series

io_uring: kill dead code in io_req_complete_post

Message ID 20240329154712.1936153-1-ming.lei@redhat.com (mailing list archive)
State New
Headers show
Series io_uring: kill dead code in io_req_complete_post | expand

Commit Message

Ming Lei March 29, 2024, 3:47 p.m. UTC
Since commit 8f6c829491fe ("io_uring: remove struct io_tw_state::locked"),
io_req_complete_post() is only called from io-wq submit work, where the
request reference is guaranteed to be grabbed and won't drop to zero
in io_req_complete_post().

Kill the dead code, meantime add req_ref_put() to put the reference.

Cc: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
---
 io_uring/io_uring.c | 37 ++-----------------------------------
 io_uring/refs.h     |  7 +++++++
 2 files changed, 9 insertions(+), 35 deletions(-)

Comments

Pavel Begunkov April 2, 2024, 6:40 p.m. UTC | #1
On 3/29/24 15:47, Ming Lei wrote:
> Since commit 8f6c829491fe ("io_uring: remove struct io_tw_state::locked"),
> io_req_complete_post() is only called from io-wq submit work, where the
> request reference is guaranteed to be grabbed and won't drop to zero
> in io_req_complete_post().
> 
> Kill the dead code, meantime add req_ref_put() to put the reference.

Interesting... a nice clean up. The assumption is too implicit to
my taste, but should be just fine if we add

if (WARN_ON_ONCE(!(issue_flags & IO_URING_F_IOWQ)))
	return;

at the beginning of io_req_complete_post(), it's a slow path.
And with this change:

Reviewed-by: Pavel Begunkov <asml.silence@gmail.com>


> Cc: Pavel Begunkov <asml.silence@gmail.com>
> Signed-off-by: Ming Lei <ming.lei@redhat.com>
> ---
>   io_uring/io_uring.c | 37 ++-----------------------------------
>   io_uring/refs.h     |  7 +++++++
>   2 files changed, 9 insertions(+), 35 deletions(-)
> 
> diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
> index 104899522bc5..ac2e5da4558a 100644
> --- a/io_uring/io_uring.c
> +++ b/io_uring/io_uring.c
> @@ -929,7 +929,6 @@ bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags)
>   static void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
>   {
>   	struct io_ring_ctx *ctx = req->ctx;
> -	struct io_rsrc_node *rsrc_node = NULL;
>   
>   	/*
>   	 * Handle special CQ sync cases via task_work. DEFER_TASKRUN requires
> @@ -946,42 +945,10 @@ static void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
>   		if (!io_fill_cqe_req(ctx, req))
>   			io_req_cqe_overflow(req);
>   	}
> -
> -	/*
> -	 * If we're the last reference to this request, add to our locked
> -	 * free_list cache.
> -	 */
> -	if (req_ref_put_and_test(req)) {
> -		if (req->flags & IO_REQ_LINK_FLAGS) {
> -			if (req->flags & IO_DISARM_MASK)
> -				io_disarm_next(req);
> -			if (req->link) {
> -				io_req_task_queue(req->link);
> -				req->link = NULL;
> -			}
> -		}
> -		io_put_kbuf_comp(req);
> -		if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS))
> -			io_clean_op(req);
> -		io_put_file(req);
> -
> -		rsrc_node = req->rsrc_node;
> -		/*
> -		 * Selected buffer deallocation in io_clean_op() assumes that
> -		 * we don't hold ->completion_lock. Clean them here to avoid
> -		 * deadlocks.
> -		 */
> -		io_put_task_remote(req->task);
> -		wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
> -		ctx->locked_free_nr++;
> -	}
>   	io_cq_unlock_post(ctx);
>   
> -	if (rsrc_node) {
> -		io_ring_submit_lock(ctx, issue_flags);
> -		io_put_rsrc_node(ctx, rsrc_node);
> -		io_ring_submit_unlock(ctx, issue_flags);
> -	}
> +	/* called from io-wq submit work only, the ref won't drop to zero */
> +	req_ref_put(req);
>   }
>   
>   void io_req_defer_failed(struct io_kiocb *req, s32 res)
> diff --git a/io_uring/refs.h b/io_uring/refs.h
> index 1336de3f2a30..63982ead9f7d 100644
> --- a/io_uring/refs.h
> +++ b/io_uring/refs.h
> @@ -33,6 +33,13 @@ static inline void req_ref_get(struct io_kiocb *req)
>   	atomic_inc(&req->refs);
>   }
>   
> +static inline void req_ref_put(struct io_kiocb *req)
> +{
> +	WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
> +	WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
> +	atomic_dec(&req->refs);
> +}
> +
>   static inline void __io_req_set_refcount(struct io_kiocb *req, int nr)
>   {
>   	if (!(req->flags & REQ_F_REFCOUNT)) {
Jens Axboe April 2, 2024, 7:59 p.m. UTC | #2
On 4/2/24 12:40 PM, Pavel Begunkov wrote:
> On 3/29/24 15:47, Ming Lei wrote:
>> Since commit 8f6c829491fe ("io_uring: remove struct io_tw_state::locked"),
>> io_req_complete_post() is only called from io-wq submit work, where the
>> request reference is guaranteed to be grabbed and won't drop to zero
>> in io_req_complete_post().
>>
>> Kill the dead code, meantime add req_ref_put() to put the reference.
> 
> Interesting... a nice clean up. The assumption is too implicit to
> my taste, but should be just fine if we add
> 
> if (WARN_ON_ONCE(!(issue_flags & IO_URING_F_IOWQ)))
>     return;

And include a comment as to why that is there as well.
diff mbox series

Patch

diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 104899522bc5..ac2e5da4558a 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -929,7 +929,6 @@  bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags)
 static void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
 {
 	struct io_ring_ctx *ctx = req->ctx;
-	struct io_rsrc_node *rsrc_node = NULL;
 
 	/*
 	 * Handle special CQ sync cases via task_work. DEFER_TASKRUN requires
@@ -946,42 +945,10 @@  static void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
 		if (!io_fill_cqe_req(ctx, req))
 			io_req_cqe_overflow(req);
 	}
-
-	/*
-	 * If we're the last reference to this request, add to our locked
-	 * free_list cache.
-	 */
-	if (req_ref_put_and_test(req)) {
-		if (req->flags & IO_REQ_LINK_FLAGS) {
-			if (req->flags & IO_DISARM_MASK)
-				io_disarm_next(req);
-			if (req->link) {
-				io_req_task_queue(req->link);
-				req->link = NULL;
-			}
-		}
-		io_put_kbuf_comp(req);
-		if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS))
-			io_clean_op(req);
-		io_put_file(req);
-
-		rsrc_node = req->rsrc_node;
-		/*
-		 * Selected buffer deallocation in io_clean_op() assumes that
-		 * we don't hold ->completion_lock. Clean them here to avoid
-		 * deadlocks.
-		 */
-		io_put_task_remote(req->task);
-		wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
-		ctx->locked_free_nr++;
-	}
 	io_cq_unlock_post(ctx);
 
-	if (rsrc_node) {
-		io_ring_submit_lock(ctx, issue_flags);
-		io_put_rsrc_node(ctx, rsrc_node);
-		io_ring_submit_unlock(ctx, issue_flags);
-	}
+	/* called from io-wq submit work only, the ref won't drop to zero */
+	req_ref_put(req);
 }
 
 void io_req_defer_failed(struct io_kiocb *req, s32 res)
diff --git a/io_uring/refs.h b/io_uring/refs.h
index 1336de3f2a30..63982ead9f7d 100644
--- a/io_uring/refs.h
+++ b/io_uring/refs.h
@@ -33,6 +33,13 @@  static inline void req_ref_get(struct io_kiocb *req)
 	atomic_inc(&req->refs);
 }
 
+static inline void req_ref_put(struct io_kiocb *req)
+{
+	WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
+	WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
+	atomic_dec(&req->refs);
+}
+
 static inline void __io_req_set_refcount(struct io_kiocb *req, int nr)
 {
 	if (!(req->flags & REQ_F_REFCOUNT)) {