diff mbox series

[RFC,1/2] io_uring: create io_queue_async() function

Message ID 1571908688-22488-2-git-send-email-bijan.mottahedeh@oracle.com (mailing list archive)
State New, archived
Headers show
Series io_uring: examine request result only after completion | expand

Commit Message

Bijan Mottahedeh Oct. 24, 2019, 9:18 a.m. UTC
This patch pulls out the code from __io_queue_sqe() and creates a new
io_queue_async() function.  It doesn't change run time, but it's a bit
cleaner and will be used in future patches.

Signed-off-by: <bijan.mottahedeh@oracle.com>
Reviewed-by: Dan Carpenter <dan.carpenter@oracle.com>
---
 fs/io_uring.c | 59 +++++++++++++++++++++++++++++++++++------------------------
 1 file changed, 35 insertions(+), 24 deletions(-)
diff mbox series

Patch

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 5415fcc..acb213c 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -362,6 +362,8 @@  struct io_submit_state {
 static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
 				 long res);
 static void __io_free_req(struct io_kiocb *req);
+static int io_queue_async(struct io_ring_ctx *ctx, struct io_kiocb *req,
+			  struct sqe_submit *s);
 
 static struct kmem_cache *req_cachep;
 
@@ -2437,6 +2439,35 @@  static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
 	return 0;
 }
 
+static int io_queue_async(struct io_ring_ctx *ctx, struct io_kiocb *req,
+			  struct sqe_submit *s)
+{
+	struct io_uring_sqe *sqe_copy;
+	struct async_list *list;
+
+	/* async context always use a copy of the sqe */
+	sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
+	if (!sqe_copy)
+		return -ENOMEM;
+
+	s->sqe = sqe_copy;
+
+	memcpy(&req->submit, s, sizeof(*s));
+	list = io_async_list_from_sqe(ctx, s->sqe);
+	if (!io_add_to_prev_work(list, req)) {
+		if (list)
+			atomic_inc(&list->cnt);
+		INIT_WORK(&req->work, io_sq_wq_submit_work);
+		io_queue_async_work(ctx, req);
+	}
+
+	/*
+	 * Queued up for async execution, worker will release
+	 * submit reference when the iocb is actually submitted.
+	 */
+	return 0;
+}
+
 static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
 			struct sqe_submit *s, bool force_nonblock)
 {
@@ -2448,30 +2479,10 @@  static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
 	 * We async punt it if the file wasn't marked NOWAIT, or if the file
 	 * doesn't support non-blocking read/write attempts
 	 */
-	if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) ||
-	    (req->flags & REQ_F_MUST_PUNT))) {
-		struct io_uring_sqe *sqe_copy;
-
-		sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
-		if (sqe_copy) {
-			struct async_list *list;
-
-			s->sqe = sqe_copy;
-			memcpy(&req->submit, s, sizeof(*s));
-			list = io_async_list_from_sqe(ctx, s->sqe);
-			if (!io_add_to_prev_work(list, req)) {
-				if (list)
-					atomic_inc(&list->cnt);
-				INIT_WORK(&req->work, io_sq_wq_submit_work);
-				io_queue_async_work(ctx, req);
-			}
-
-			/*
-			 * Queued up for async execution, worker will release
-			 * submit reference when the iocb is actually submitted.
-			 */
-			return 0;
-		}
+	if (ret == -EAGAIN &&
+	    (!(req->flags & REQ_F_NOWAIT) || (req->flags & REQ_F_MUST_PUNT)) &&
+	    !io_queue_async(ctx, req, s)) {
+		return 0;
 	}
 
 	/* drop submission reference */