@@ -690,10 +690,16 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return 0;
}
-static inline void io_recv_prep_retry(struct io_kiocb *req)
+static inline void io_recv_prep_retry(struct io_kiocb *req,
+ struct io_async_msghdr *kmsg)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+ if (kmsg->free_iov) {
+ kfree(kmsg->free_iov);
+ kmsg->free_iov = NULL;
+ }
+
req->flags &= ~REQ_F_BL_EMPTY;
sr->done_io = 0;
sr->len = 0; /* get from the provided buffer */
@@ -725,7 +731,7 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
int mshot_retry_ret = IOU_ISSUE_SKIP_COMPLETE;
- io_recv_prep_retry(req);
+ io_recv_prep_retry(req, kmsg);
/* Known not-empty or unknown state, retry */
if (cflags & IORING_CQE_F_SOCK_NONEMPTY || kmsg->msg.msg_inq < 0) {
if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY)
@@ -734,10 +740,9 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
sr->nr_multishot_loops = 0;
mshot_retry_ret = IOU_REQUEUE;
}
- if (issue_flags & IO_URING_F_MULTISHOT)
+ *ret = io_setup_async_msg(req, kmsg, issue_flags);
+ if (*ret == -EAGAIN && issue_flags & IO_URING_F_MULTISHOT)
*ret = mshot_retry_ret;
- else
- *ret = -EAGAIN;
return true;
}
@@ -748,6 +753,7 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
*ret = IOU_STOP_MULTISHOT;
else
*ret = IOU_OK;
+ io_req_msg_cleanup(req, kmsg, issue_flags);
return true;
}
@@ -931,11 +937,6 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
if (!io_recv_finish(req, &ret, kmsg, mshot_finished, issue_flags))
goto retry_multishot;
- if (mshot_finished)
- io_req_msg_cleanup(req, kmsg, issue_flags);
- else if (ret == -EAGAIN)
- return io_setup_async_msg(req, kmsg, issue_flags);
-
return ret;
}
@@ -1037,11 +1038,6 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
if (!io_recv_finish(req, &ret, kmsg, ret <= 0, issue_flags))
goto retry_multishot;
- if (ret == -EAGAIN)
- return io_setup_async_msg(req, kmsg, issue_flags);
- else if (ret != IOU_OK && ret != IOU_STOP_MULTISHOT)
- io_req_msg_cleanup(req, kmsg, issue_flags);
-
return ret;
}
Now that recv/recvmsg both do the same cleanup, put it in the retry and finish handlers. Signed-off-by: Jens Axboe <axboe@kernel.dk> --- io_uring/net.c | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-)