diff mbox series

[2/2] io_uring: replace s->needs_lock with s->in_async

Message ID 20191009011959.2203-2-liuyun01@kylinos.cn (mailing list archive)
State New, archived
Headers show
Series [1/2] io_uring: make the logic clearer for io_sequence_defer | expand

Commit Message

Jackie Liu Oct. 9, 2019, 1:19 a.m. UTC
There is no function change, just to clean up the code, use s->in_async
to make the code know where it is.

Signed-off-by: Jackie Liu <liuyun01@kylinos.cn>
---
 fs/io_uring.c | 27 +++++++++------------------
 1 file changed, 9 insertions(+), 18 deletions(-)

Comments

Jens Axboe Oct. 10, 2019, 4:10 p.m. UTC | #1
On 10/8/19 7:19 PM, Jackie Liu wrote:
> There is no function change, just to clean up the code, use s->in_async
> to make the code know where it is.

This seems to be a somewhat older code base, as it doesn't have
the changes done post 5.3 (io_rw_done -> kiocb_done, for example).
I hand applied it, thanks.
diff mbox series

Patch

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 8ec2443eb019..3bb638b26cb7 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -268,7 +268,7 @@  struct sqe_submit {
 	unsigned short			index;
 	u32				sequence;
 	bool				has_user;
-	bool				needs_lock;
+	bool				in_async;
 	bool				needs_fixed_file;
 };
 
@@ -1390,11 +1390,7 @@  static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
 		if (!force_nonblock || ret2 != -EAGAIN) {
 			io_rw_done(kiocb, ret2);
 		} else {
-			/*
-			 * If ->needs_lock is true, we're already in async
-			 * context.
-			 */
-			if (!s->needs_lock)
+			if (!s->in_async)
 				io_async_list_note(READ, req, iov_count);
 			ret = -EAGAIN;
 		}
@@ -1432,8 +1428,7 @@  static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
 
 	ret = -EAGAIN;
 	if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT)) {
-		/* If ->needs_lock is true, we're already in async context. */
-		if (!s->needs_lock)
+		if (!s->in_async)
 			io_async_list_note(WRITE, req, iov_count);
 		goto out_free;
 	}
@@ -1464,11 +1459,7 @@  static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
 		if (!force_nonblock || ret2 != -EAGAIN) {
 			io_rw_done(kiocb, ret2);
 		} else {
-			/*
-			 * If ->needs_lock is true, we're already in async
-			 * context.
-			 */
-			if (!s->needs_lock)
+			if (!s->in_async)
 				io_async_list_note(WRITE, req, iov_count);
 			ret = -EAGAIN;
 		}
@@ -2029,10 +2020,10 @@  static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
 			return -EAGAIN;
 
 		/* workqueue context doesn't hold uring_lock, grab it now */
-		if (s->needs_lock)
+		if (s->in_async)
 			mutex_lock(&ctx->uring_lock);
 		io_iopoll_req_issued(req);
-		if (s->needs_lock)
+		if (s->in_async)
 			mutex_unlock(&ctx->uring_lock);
 	}
 
@@ -2096,7 +2087,7 @@  static void io_sq_wq_submit_work(struct work_struct *work)
 
 		if (!ret) {
 			s->has_user = cur_mm != NULL;
-			s->needs_lock = true;
+			s->in_async = true;
 			do {
 				ret = __io_submit_sqe(ctx, req, s, false);
 				/*
@@ -2552,7 +2543,7 @@  static int io_submit_sqes(struct io_ring_ctx *ctx, struct sqe_submit *sqes,
 						-EFAULT);
 		} else {
 			sqes[i].has_user = has_user;
-			sqes[i].needs_lock = true;
+			sqes[i].in_async = true;
 			sqes[i].needs_fixed_file = true;
 			io_submit_sqe(ctx, &sqes[i], statep, &link, true);
 			submitted++;
@@ -2738,7 +2729,7 @@  static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit,
 
 out:
 		s.has_user = true;
-		s.needs_lock = false;
+		s.in_async = false;
 		s.needs_fixed_file = false;
 		submit++;