diff mbox series

io_uring: kbuf: kill __io_kbuf_recycle()

Message ID 20220622055551.642370-1-hao.xu@linux.dev (mailing list archive)
State New
Headers show
Series io_uring: kbuf: kill __io_kbuf_recycle() | expand

Commit Message

Hao Xu June 22, 2022, 5:55 a.m. UTC
From: Hao Xu <howeyxu@tencent.com>

__io_kbuf_recycle() is only called in io_kbuf_recycle(). Kill it and
tweak the code so that the legacy pbuf and ring pbuf code become clear

Signed-off-by: Hao Xu <howeyxu@tencent.com>
---
 io_uring/kbuf.c | 71 +++++++++++++++++++++++++++++--------------------
 io_uring/kbuf.h | 21 ++++++---------
 2 files changed, 50 insertions(+), 42 deletions(-)

Comments

Jens Axboe June 22, 2022, 5:48 p.m. UTC | #1
On 6/21/22 11:55 PM, Hao Xu wrote:
> From: Hao Xu <howeyxu@tencent.com>
> 
> __io_kbuf_recycle() is only called in io_kbuf_recycle(). Kill it and
> tweak the code so that the legacy pbuf and ring pbuf code become clear

I have applied this one as I think it makes sense separately, but I'd
really like to see the ring provided buffer recycling done inline as
that is fast path for provided buffers (and it's very few instructions).
Care to do a patch on top for that?
Jens Axboe June 22, 2022, 10:27 p.m. UTC | #2
On Wed, 22 Jun 2022 13:55:51 +0800, Hao Xu wrote:
> From: Hao Xu <howeyxu@tencent.com>
> 
> __io_kbuf_recycle() is only called in io_kbuf_recycle(). Kill it and
> tweak the code so that the legacy pbuf and ring pbuf code become clear
> 
> 

Applied, thanks!

[1/1] io_uring: kbuf: kill __io_kbuf_recycle()
      commit: b4ef7c36b5ca6a0b96c8b493c495b17a0884fd11

Best regards,
Hao Xu June 23, 2022, 7:18 a.m. UTC | #3
On 6/23/22 01:48, Jens Axboe wrote:
> On 6/21/22 11:55 PM, Hao Xu wrote:
>> From: Hao Xu <howeyxu@tencent.com>
>>
>> __io_kbuf_recycle() is only called in io_kbuf_recycle(). Kill it and
>> tweak the code so that the legacy pbuf and ring pbuf code become clear
> 
> I have applied this one as I think it makes sense separately, but I'd
> really like to see the ring provided buffer recycling done inline as
> that is fast path for provided buffers (and it's very few instructions).
> Care to do a patch on top for that?
> 

No problem.
diff mbox series

Patch

diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index e4ee11cd337c..4b7f2aa99e38 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -37,36 +37,30 @@  static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
 	return xa_load(&ctx->io_bl_xa, bgid);
 }
 
-void __io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
+static int io_buffer_add_list(struct io_ring_ctx *ctx,
+			      struct io_buffer_list *bl, unsigned int bgid)
+{
+	bl->bgid = bgid;
+	if (bgid < BGID_ARRAY)
+		return 0;
+
+	return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
+}
+
+void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
 {
 	struct io_ring_ctx *ctx = req->ctx;
 	struct io_buffer_list *bl;
 	struct io_buffer *buf;
 
 	/*
-	 * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
-	 * the flag and hence ensure that bl->head doesn't get incremented.
-	 * If the tail has already been incremented, hang on to it.
+	 * For legacy provided buffer mode, don't recycle if we already did
+	 * IO to this buffer. For ring-mapped provided buffer mode, we should
+	 * increment ring->head to explicitly monopolize the buffer to avoid
+	 * multiple use.
 	 */
-	if (req->flags & REQ_F_BUFFER_RING) {
-		if (req->buf_list) {
-			if (req->flags & REQ_F_PARTIAL_IO) {
-				/*
-				 * If we end up here, then the io_uring_lock has
-				 * been kept held since we retrieved the buffer.
-				 * For the io-wq case, we already cleared
-				 * req->buf_list when the buffer was retrieved,
-				 * hence it cannot be set here for that case.
-				 */
-				req->buf_list->head++;
-				req->buf_list = NULL;
-			} else {
-				req->buf_index = req->buf_list->bgid;
-				req->flags &= ~REQ_F_BUFFER_RING;
-			}
-		}
+	if (req->flags & REQ_F_PARTIAL_IO)
 		return;
-	}
 
 	io_ring_submit_lock(ctx, issue_flags);
 
@@ -77,16 +71,35 @@  void __io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
 	req->buf_index = buf->bgid;
 
 	io_ring_submit_unlock(ctx, issue_flags);
+	return;
 }
 
-static int io_buffer_add_list(struct io_ring_ctx *ctx,
-			      struct io_buffer_list *bl, unsigned int bgid)
+void io_kbuf_recycle_ring(struct io_kiocb *req)
 {
-	bl->bgid = bgid;
-	if (bgid < BGID_ARRAY)
-		return 0;
-
-	return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
+	/*
+	 * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
+	 * the flag and hence ensure that bl->head doesn't get incremented.
+	 * If the tail has already been incremented, hang on to it.
+	 * The exception is partial io, that case we should increment bl->head
+	 * to monopolize the buffer.
+	 */
+	if (req->buf_list) {
+		if (req->flags & REQ_F_PARTIAL_IO) {
+			/*
+			 * If we end up here, then the io_uring_lock has
+			 * been kept held since we retrieved the buffer.
+			 * For the io-wq case, we already cleared
+			 * req->buf_list when the buffer was retrieved,
+			 * hence it cannot be set here for that case.
+			 */
+			req->buf_list->head++;
+			req->buf_list = NULL;
+		} else {
+			req->buf_index = req->buf_list->bgid;
+			req->flags &= ~REQ_F_BUFFER_RING;
+		}
+	}
+	return;
 }
 
 unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h
index 5da3d4039aed..b5a89ffadf31 100644
--- a/io_uring/kbuf.h
+++ b/io_uring/kbuf.h
@@ -35,7 +35,6 @@  struct io_buffer {
 
 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
 			      unsigned int issue_flags);
-void __io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags);
 void io_destroy_buffers(struct io_ring_ctx *ctx);
 
 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
@@ -49,6 +48,9 @@  int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
 
 unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
 
+void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
+void io_kbuf_recycle_ring(struct io_kiocb *req);
+
 static inline bool io_do_buffer_select(struct io_kiocb *req)
 {
 	if (!(req->flags & REQ_F_BUFFER_SELECT))
@@ -58,18 +60,11 @@  static inline bool io_do_buffer_select(struct io_kiocb *req)
 
 static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
 {
-	if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
-		return;
-	/*
-	 * For legacy provided buffer mode, don't recycle if we already did
-	 * IO to this buffer. For ring-mapped provided buffer mode, we should
-	 * increment ring->head to explicitly monopolize the buffer to avoid
-	 * multiple use.
-	 */
-	if ((req->flags & REQ_F_BUFFER_SELECTED) &&
-	    (req->flags & REQ_F_PARTIAL_IO))
-		return;
-	__io_kbuf_recycle(req, issue_flags);
+	if (req->flags & REQ_F_BUFFER_SELECTED)
+		io_kbuf_recycle_legacy(req, issue_flags);
+
+	if (req->flags & REQ_F_BUFFER_RING)
+		io_kbuf_recycle_ring(req);
 }
 
 static inline unsigned int __io_put_kbuf_list(struct io_kiocb *req,