diff mbox series

[v2] io_uring: kbuf: inline io_kbuf_recycle_ring()

Message ID 20220623130126.179232-1-hao.xu@linux.dev (mailing list archive)
State New
Headers show
Series [v2] io_uring: kbuf: inline io_kbuf_recycle_ring() | expand

Commit Message

Hao Xu June 23, 2022, 1:01 p.m. UTC
From: Hao Xu <howeyxu@tencent.com>

Make io_kbuf_recycle_ring() inline since it is the fast path of
provided buffer.

Signed-off-by: Hao Xu <howeyxu@tencent.com>
---
 io_uring/kbuf.c | 28 ----------------------------
 io_uring/kbuf.h | 28 +++++++++++++++++++++++++++-
 2 files changed, 27 insertions(+), 29 deletions(-)


base-commit: 5ec69c3a15ae6e904d76545d9a9c686eb758def0

Comments

Jens Axboe June 23, 2022, 1:59 p.m. UTC | #1
On Thu, 23 Jun 2022 21:01:26 +0800, Hao Xu wrote:
> From: Hao Xu <howeyxu@tencent.com>
> 
> Make io_kbuf_recycle_ring() inline since it is the fast path of
> provided buffer.
> 
> 

Applied, thanks!

[1/1] io_uring: kbuf: inline io_kbuf_recycle_ring()
      commit: 0c7e8750e623d48fb39f1284fe77299edbdbc2bc

Best regards,
diff mbox series

Patch

diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index 4b7f2aa99e38..8e4f1e8aaf4a 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -74,34 +74,6 @@  void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
 	return;
 }
 
-void io_kbuf_recycle_ring(struct io_kiocb *req)
-{
-	/*
-	 * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
-	 * the flag and hence ensure that bl->head doesn't get incremented.
-	 * If the tail has already been incremented, hang on to it.
-	 * The exception is partial io, that case we should increment bl->head
-	 * to monopolize the buffer.
-	 */
-	if (req->buf_list) {
-		if (req->flags & REQ_F_PARTIAL_IO) {
-			/*
-			 * If we end up here, then the io_uring_lock has
-			 * been kept held since we retrieved the buffer.
-			 * For the io-wq case, we already cleared
-			 * req->buf_list when the buffer was retrieved,
-			 * hence it cannot be set here for that case.
-			 */
-			req->buf_list->head++;
-			req->buf_list = NULL;
-		} else {
-			req->buf_index = req->buf_list->bgid;
-			req->flags &= ~REQ_F_BUFFER_RING;
-		}
-	}
-	return;
-}
-
 unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
 {
 	unsigned int cflags;
diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h
index b5a89ffadf31..3d48f1ab5439 100644
--- a/io_uring/kbuf.h
+++ b/io_uring/kbuf.h
@@ -49,7 +49,33 @@  int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
 unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
 
 void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
-void io_kbuf_recycle_ring(struct io_kiocb *req);
+
+static inline void io_kbuf_recycle_ring(struct io_kiocb *req)
+{
+	/*
+	 * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
+	 * the flag and hence ensure that bl->head doesn't get incremented.
+	 * If the tail has already been incremented, hang on to it.
+	 * The exception is partial io, that case we should increment bl->head
+	 * to monopolize the buffer.
+	 */
+	if (req->buf_list) {
+		if (req->flags & REQ_F_PARTIAL_IO) {
+			/*
+			 * If we end up here, then the io_uring_lock has
+			 * been kept held since we retrieved the buffer.
+			 * For the io-wq case, we already cleared
+			 * req->buf_list when the buffer was retrieved,
+			 * hence it cannot be set here for that case.
+			 */
+			req->buf_list->head++;
+			req->buf_list = NULL;
+		} else {
+			req->buf_index = req->buf_list->bgid;
+			req->flags &= ~REQ_F_BUFFER_RING;
+		}
+	}
+}
 
 static inline bool io_do_buffer_select(struct io_kiocb *req)
 {