@@ -880,7 +880,15 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
if (unlikely(ret))
return ret;
- ret = io_iter_do_read(rw, &io->iter);
+ if (unlikely(req->opcode == IORING_OP_READ_MULTISHOT)) {
+ void *cb_copy = rw->kiocb.ki_complete;
+
+ rw->kiocb.ki_complete = NULL;
+ ret = io_iter_do_read(rw, &io->iter);
+ rw->kiocb.ki_complete = cb_copy;
+ } else {
+ ret = io_iter_do_read(rw, &io->iter);
+ }
/*
* Some file systems like to return -EOPNOTSUPP for an IOCB_NOWAIT
@@ -904,7 +912,8 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
} else if (ret == -EIOCBQUEUED) {
return IOU_ISSUE_SKIP_COMPLETE;
} else if (ret == req->cqe.res || ret <= 0 || !force_nonblock ||
- (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) {
+ (req->flags & REQ_F_NOWAIT) || !need_complete_io(req) ||
+ (issue_flags & IO_URING_F_MULTISHOT)) {
/* read all, failed, already did sync or don't want to retry */
goto done;
}
At the moment we can't sanely handle queuing an async request from a multishot context, so disable them. It shouldn't matter as pollable files / socekts don't normally do async. Patching it in __io_read() is not the cleanest way, but it's simpler than other options, so let's fix it there and clean up on top. Cc: stable@vger.kernel.org Reported-by: chase xd <sl1589472800@gmail.com> Fixes: fc68fcda04910 ("io_uring/rw: add support for IORING_OP_READ_MULTISHOT") Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> --- io_uring/rw.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-)