@@ -403,9 +403,7 @@ static int io_sendmsg_zc_setup(struct io_kiocb *req, const struct io_uring_sqe *
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_async_msghdr *kmsg = req->async_data;
struct user_msghdr msg;
- int ret, iovec_off;
- struct iovec *iov;
- void *res;
+ int ret;
if (!(sr->flags & IORING_RECVSEND_FIXED_BUF))
return io_sendmsg_setup(req, sqe);
@@ -416,24 +414,9 @@ static int io_sendmsg_zc_setup(struct io_kiocb *req, const struct io_uring_sqe *
if (unlikely(ret))
return ret;
sr->msg_control = kmsg->msg.msg_control_user;
-
- if (msg.msg_iovlen > kmsg->vec.nr || WARN_ON_ONCE(!kmsg->vec.iovec)) {
- ret = io_vec_realloc(&kmsg->vec, msg.msg_iovlen);
- if (ret)
- return ret;
- req->flags |= REQ_F_NEED_CLEANUP;
- }
- iovec_off = kmsg->vec.nr - msg.msg_iovlen;
- iov = kmsg->vec.iovec + iovec_off;
-
- res = iovec_from_user(msg.msg_iov, msg.msg_iovlen, kmsg->vec.nr, iov,
- io_is_compat(req->ctx));
- if (IS_ERR(res))
- return PTR_ERR(res);
-
kmsg->msg.msg_iter.nr_segs = msg.msg_iovlen;
- req->flags |= REQ_F_IMPORT_BUFFER;
- return ret;
+
+ return io_prep_reg_iovec(req, &kmsg->vec, msg.msg_iov, msg.msg_iovlen);
}
#define SENDMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_BUNDLE)
@@ -1397,3 +1397,29 @@ int io_import_reg_vec(int ddir, struct iov_iter *iter,
return io_vec_fill_bvec(ddir, iter, imu, iov, nr_iovs, vec);
}
+
+int io_prep_reg_iovec(struct io_kiocb *req, struct iou_vec *iv,
+ const struct iovec __user *uvec, size_t uvec_segs)
+{
+ struct iovec *iov;
+ int iovec_off, ret;
+ void *res;
+
+ if (uvec_segs > iv->nr) {
+ ret = io_vec_realloc(iv, uvec_segs);
+ if (ret)
+ return ret;
+ req->flags |= REQ_F_NEED_CLEANUP;
+ }
+
+ /* pad iovec to the right */
+ iovec_off = iv->nr - uvec_segs;
+ iov = iv->iovec + iovec_off;
+ res = iovec_from_user(uvec, uvec_segs, uvec_segs, iov,
+ io_is_compat(req->ctx));
+ if (IS_ERR(res))
+ return PTR_ERR(res);
+
+ req->flags |= REQ_F_IMPORT_BUFFER;
+ return 0;
+}
@@ -67,6 +67,8 @@ int io_import_reg_vec(int ddir, struct iov_iter *iter,
struct io_kiocb *req, struct iou_vec *vec,
unsigned nr_iovs, unsigned iovec_off,
unsigned issue_flags);
+int io_prep_reg_iovec(struct io_kiocb *req, struct iou_vec *iv,
+ const struct iovec __user *uvec, size_t uvec_segs);
int io_register_clone_buffers(struct io_ring_ctx *ctx, void __user *arg);
int io_sqe_buffers_unregister(struct io_ring_ctx *ctx);
@@ -407,28 +407,9 @@ static int io_rw_prep_reg_vec(struct io_kiocb *req)
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
struct io_async_rw *io = req->async_data;
const struct iovec __user *uvec;
- size_t uvec_segs = rw->len;
- struct iovec *iov;
- int iovec_off, ret;
- void *res;
- if (uvec_segs > io->vec.nr) {
- ret = io_vec_realloc(&io->vec, uvec_segs);
- if (ret)
- return ret;
- req->flags |= REQ_F_NEED_CLEANUP;
- }
- /* pad iovec to the right */
- iovec_off = io->vec.nr - uvec_segs;
- iov = io->vec.iovec + iovec_off;
uvec = u64_to_user_ptr(rw->addr);
- res = iovec_from_user(uvec, uvec_segs, uvec_segs, iov,
- io_is_compat(req->ctx));
- if (IS_ERR(res))
- return PTR_ERR(res);
-
- req->flags |= REQ_F_IMPORT_BUFFER;
- return 0;
+ return io_prep_reg_iovec(req, &io->vec, uvec, rw->len);
}
int io_prep_readv_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
iovecs that are turned into registered buffers are imported in a special way with an offset, so that later we can do an in place translation. Add a helper function taking care of it. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> --- io_uring/net.c | 23 +++-------------------- io_uring/rsrc.c | 26 ++++++++++++++++++++++++++ io_uring/rsrc.h | 2 ++ io_uring/rw.c | 21 +-------------------- 4 files changed, 32 insertions(+), 40 deletions(-)