diff mbox series

[v1,1/1,io_uring] fix handling SQEs requesting NOWAIT

Message ID 20190427183419.5971-1-source@stbuehler.de (mailing list archive)
State Accepted, archived
Headers show
Series [v1,1/1,io_uring] fix handling SQEs requesting NOWAIT | expand

Commit Message

Stefan Bühler April 27, 2019, 6:34 p.m. UTC
Not all request types set REQ_F_FORCE_NONBLOCK when they needed async
punting; reverse logic instead and set REQ_F_NOWAIT if request mustn't
be punted.

Signed-off-by: Stefan Bühler <source@stbuehler.de>
---
 fs/io_uring.c | 18 ++++++++++--------
 1 file changed, 10 insertions(+), 8 deletions(-)

Comments

Jens Axboe April 30, 2019, 3:40 p.m. UTC | #1
On 4/27/19 12:34 PM, Stefan Bühler wrote:
> Not all request types set REQ_F_FORCE_NONBLOCK when they needed async
> punting; reverse logic instead and set REQ_F_NOWAIT if request mustn't
> be punted.

I like doing it this way, so we don't have to touch the other callers.
I've merged this one with my patch, no need to have two separate fixes
for it.
diff mbox series

Patch

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 25632e399a78..77b247b5d10b 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -322,7 +322,7 @@  struct io_kiocb {
 	struct list_head	list;
 	unsigned int		flags;
 	refcount_t		refs;
-#define REQ_F_FORCE_NONBLOCK	1	/* inline submission attempt */
+#define REQ_F_NOWAIT		1	/* must not punt to workers */
 #define REQ_F_IOPOLL_COMPLETED	2	/* polled IO has completed */
 #define REQ_F_FIXED_FILE	4	/* ctx owns file */
 #define REQ_F_SEQ_PREV		8	/* sequential with previous */
@@ -872,11 +872,14 @@  static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
 	ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
 	if (unlikely(ret))
 		return ret;
-	/* only force async punt if the sqe didn't ask for NOWAIT */
-	if (force_nonblock && !(kiocb->ki_flags & IOCB_NOWAIT)) {
+
+	/* don't allow async punt if RWF_NOWAIT was requested */
+	if (kiocb->ki_flags & IOCB_NOWAIT)
+		req->flags |= REQ_F_NOWAIT;
+
+	if (force_nonblock)
 		kiocb->ki_flags |= IOCB_NOWAIT;
-		req->flags |= REQ_F_FORCE_NONBLOCK;
-	}
+
 	if (ctx->flags & IORING_SETUP_IOPOLL) {
 		if (!(kiocb->ki_flags & IOCB_DIRECT) ||
 		    !kiocb->ki_filp->f_op->iopoll)
@@ -1535,8 +1538,7 @@  static void io_sq_wq_submit_work(struct work_struct *work)
 		struct sqe_submit *s = &req->submit;
 		const struct io_uring_sqe *sqe = s->sqe;
 
-		/* Ensure we clear previously set forced non-block flag */
-		req->flags &= ~REQ_F_FORCE_NONBLOCK;
+		/* Ensure we clear previously set non-block flag */
 		req->rw.ki_flags &= ~IOCB_NOWAIT;
 
 		ret = 0;
@@ -1722,7 +1724,7 @@  static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
 		goto out;
 
 	ret = __io_submit_sqe(ctx, req, s, true);
-	if (ret == -EAGAIN && (req->flags & REQ_F_FORCE_NONBLOCK)) {
+	if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
 		struct io_uring_sqe *sqe_copy;
 
 		sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL);