diff mbox series

[7/7] io_uring/net: add provided buffer and bundle support to send zc

Message ID 20241023161522.1126423-8-axboe@kernel.dk (mailing list archive)
State New
Headers show
Series Add support for provided registered buffers | expand

Commit Message

Jens Axboe Oct. 23, 2024, 4:07 p.m. UTC
Provided buffers inform the kernel which buffer group ID to pick a
buffer from for transfer. Normally that buffer contains the usual
addr + length information, as well as a buffer ID that is passed back
at completion time to inform the application of which buffer was used
for the transfer.

However, if registered and provided buffers are combined, then the
provided buffer must instead tell the kernel which registered buffer
index should be used, and the length/offset within that buffer. Rather
than store the addr + length, the application must instead store this
information instead.

If provided buffers are used with send zc, then those buffers must be
an index into a registered buffer. Change the mapping type to use
KBUF_MODE_BVEC, which tells the kbuf handlers to turn the mappings
into bio_vecs rather than iovecs. Then all that is needed is to
setup our iov_iterator to use iov_iter_bvec().

Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 io_uring/net.c   | 64 +++++++++++++++++++++++++++++++++---------------
 io_uring/net.h   | 10 ++++++--
 io_uring/opdef.c |  1 +
 3 files changed, 53 insertions(+), 22 deletions(-)

Comments

Pavel Begunkov Oct. 24, 2024, 2:44 p.m. UTC | #1
On 10/23/24 17:07, Jens Axboe wrote:
> Provided buffers inform the kernel which buffer group ID to pick a
> buffer from for transfer. Normally that buffer contains the usual
> addr + length information, as well as a buffer ID that is passed back
> at completion time to inform the application of which buffer was used
> for the transfer.
> 
> However, if registered and provided buffers are combined, then the
> provided buffer must instead tell the kernel which registered buffer
> index should be used, and the length/offset within that buffer. Rather
> than store the addr + length, the application must instead store this
> information instead.
> 
> If provided buffers are used with send zc, then those buffers must be
> an index into a registered buffer. Change the mapping type to use
> KBUF_MODE_BVEC, which tells the kbuf handlers to turn the mappings
> into bio_vecs rather than iovecs. Then all that is needed is to
> setup our iov_iterator to use iov_iter_bvec().
> 
> Signed-off-by: Jens Axboe <axboe@kernel.dk>
> ---
...
> diff --git a/io_uring/net.h b/io_uring/net.h
> index 52bfee05f06a..e052762cf85d 100644
> --- a/io_uring/net.h
> +++ b/io_uring/net.h
> @@ -5,9 +5,15 @@
>   
>   struct io_async_msghdr {
>   #if defined(CONFIG_NET)
> -	struct iovec			fast_iov;
> +	union {
> +		struct iovec		fast_iov;
> +		struct bio_vec		fast_bvec;
> +	};
>   	/* points to an allocated iov, if NULL we use fast_iov instead */
> -	struct iovec			*free_iov;
> +	union {
> +		struct iovec		*free_iov;
> +		struct bio_vec		*free_bvec;

I'd rather not do it like that, aliasing with reusing memory and
counting the number is a recipe for disaster when scattered across
code. E.g. seems you change all(?) iovec allocations to allocate
based on the size of the larger structure.

Counting bytes as in my series is less fragile, otherwise it needs
a new structure and a set of helpers that can be kept together.
Jens Axboe Oct. 24, 2024, 2:48 p.m. UTC | #2
On 10/24/24 8:44 AM, Pavel Begunkov wrote:
> On 10/23/24 17:07, Jens Axboe wrote:
>> Provided buffers inform the kernel which buffer group ID to pick a
>> buffer from for transfer. Normally that buffer contains the usual
>> addr + length information, as well as a buffer ID that is passed back
>> at completion time to inform the application of which buffer was used
>> for the transfer.
>>
>> However, if registered and provided buffers are combined, then the
>> provided buffer must instead tell the kernel which registered buffer
>> index should be used, and the length/offset within that buffer. Rather
>> than store the addr + length, the application must instead store this
>> information instead.
>>
>> If provided buffers are used with send zc, then those buffers must be
>> an index into a registered buffer. Change the mapping type to use
>> KBUF_MODE_BVEC, which tells the kbuf handlers to turn the mappings
>> into bio_vecs rather than iovecs. Then all that is needed is to
>> setup our iov_iterator to use iov_iter_bvec().
>>
>> Signed-off-by: Jens Axboe <axboe@kernel.dk>
>> ---
> ...
>> diff --git a/io_uring/net.h b/io_uring/net.h
>> index 52bfee05f06a..e052762cf85d 100644
>> --- a/io_uring/net.h
>> +++ b/io_uring/net.h
>> @@ -5,9 +5,15 @@
>>     struct io_async_msghdr {
>>   #if defined(CONFIG_NET)
>> -    struct iovec            fast_iov;
>> +    union {
>> +        struct iovec        fast_iov;
>> +        struct bio_vec        fast_bvec;
>> +    };
>>       /* points to an allocated iov, if NULL we use fast_iov instead */
>> -    struct iovec            *free_iov;
>> +    union {
>> +        struct iovec        *free_iov;
>> +        struct bio_vec        *free_bvec;
> 
> I'd rather not do it like that, aliasing with reusing memory and
> counting the number is a recipe for disaster when scattered across
> code. E.g. seems you change all(?) iovec allocations to allocate
> based on the size of the larger structure.
> 
> Counting bytes as in my series is less fragile, otherwise it needs
> a new structure and a set of helpers that can be kept together.

I have been pondering this, because I'm not a huge fan either. But
outside of the space side, it does come out pretty nicely/clean. This
series is really just a WIP posting as per the RFC, mostly just so we
can come up with something that's clean enough and works for both cases,
as it does have the caching that your series does not. And to
facilitate some more efficient TX/RX zero copy testing.

I'd love a separate struct for these two, but that kind of gets in the
way of the usual iovec imports. I'll get back to this soonish, it's not
a 6.13 thing by any stretch.
Pavel Begunkov Oct. 24, 2024, 3:36 p.m. UTC | #3
On 10/24/24 15:48, Jens Axboe wrote:
> On 10/24/24 8:44 AM, Pavel Begunkov wrote:
>> On 10/23/24 17:07, Jens Axboe wrote:
>>> Provided buffers inform the kernel which buffer group ID to pick a
>>> buffer from for transfer. Normally that buffer contains the usual
>>> addr + length information, as well as a buffer ID that is passed back
>>> at completion time to inform the application of which buffer was used
>>> for the transfer.
>>>
>>> However, if registered and provided buffers are combined, then the
>>> provided buffer must instead tell the kernel which registered buffer
>>> index should be used, and the length/offset within that buffer. Rather
>>> than store the addr + length, the application must instead store this
>>> information instead.
>>>
>>> If provided buffers are used with send zc, then those buffers must be
>>> an index into a registered buffer. Change the mapping type to use
>>> KBUF_MODE_BVEC, which tells the kbuf handlers to turn the mappings
>>> into bio_vecs rather than iovecs. Then all that is needed is to
>>> setup our iov_iterator to use iov_iter_bvec().
>>>
>>> Signed-off-by: Jens Axboe <axboe@kernel.dk>
>>> ---
>> ...
>>> diff --git a/io_uring/net.h b/io_uring/net.h
>>> index 52bfee05f06a..e052762cf85d 100644
>>> --- a/io_uring/net.h
>>> +++ b/io_uring/net.h
>>> @@ -5,9 +5,15 @@
>>>      struct io_async_msghdr {
>>>    #if defined(CONFIG_NET)
>>> -    struct iovec            fast_iov;
>>> +    union {
>>> +        struct iovec        fast_iov;
>>> +        struct bio_vec        fast_bvec;
>>> +    };
>>>        /* points to an allocated iov, if NULL we use fast_iov instead */
>>> -    struct iovec            *free_iov;
>>> +    union {
>>> +        struct iovec        *free_iov;
>>> +        struct bio_vec        *free_bvec;
>>
>> I'd rather not do it like that, aliasing with reusing memory and
>> counting the number is a recipe for disaster when scattered across
>> code. E.g. seems you change all(?) iovec allocations to allocate
>> based on the size of the larger structure.
>>
>> Counting bytes as in my series is less fragile, otherwise it needs
>> a new structure and a set of helpers that can be kept together.
> 
> I have been pondering this, because I'm not a huge fan either. But
> outside of the space side, it does come out pretty nicely/clean. This
> series is really just a WIP posting as per the RFC, mostly just so we
> can come up with something that's clean enough and works for both cases,
> as it does have the caching that your series does not. And to
> facilitate some more efficient TX/RX zero copy testing.

The thing is, I know how to implement caching on top of my series,
but as I commented in the other thread, I don't think it can reuse
the incremental helper well. At least it can try to unify the imu
checking / parsing, bvec copy-setup, but that's minor.
diff mbox series

Patch

diff --git a/io_uring/net.c b/io_uring/net.c
index 154756762a46..c062b1c685bd 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -83,6 +83,8 @@  struct io_sr_msg {
 
 static int io_sg_from_iter(struct sk_buff *skb, struct iov_iter *from,
 			   size_t length);
+static int io_sg_from_iter_iovec(struct sk_buff *skb, struct iov_iter *from,
+				 size_t length);
 
 /*
  * Number of times we'll try and do receives if there's more data. If we
@@ -581,33 +583,34 @@  int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
 	return IOU_OK;
 }
 
-static int io_send_zc_import_single(struct io_kiocb *req,
-				    unsigned int issue_flags)
+static int __io_send_zc_import(struct io_kiocb *req,
+			       struct io_async_msghdr *kmsg, int nsegs)
 {
 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
-	struct io_async_msghdr *kmsg = req->async_data;
 	struct io_ring_ctx *ctx = req->ctx;
 	struct io_mapped_ubuf *imu;
 	int ret;
 	u16 idx;
 
-	ret = -EFAULT;
-	io_ring_submit_lock(ctx, issue_flags);
-	if (sr->buf_index < ctx->nr_user_bufs) {
+	if (req->flags & REQ_F_BUFFER_SELECT) {
+		struct bio_vec *bv = kmsg->free_bvec ?: &kmsg->fast_bvec;
+
+		WARN_ON_ONCE(bv == &kmsg->fast_bvec && nsegs > 1);
+		iov_iter_bvec(&kmsg->msg.msg_iter, ITER_SOURCE, bv, nsegs, sr->len);
+	} else {
+		if (WARN_ON_ONCE(nsegs != 1))
+			return -EFAULT;
+		if (unlikely(sr->buf_index >= ctx->nr_user_bufs))
+			return -EFAULT;
 		idx = array_index_nospec(sr->buf_index, ctx->nr_user_bufs);
 		imu = READ_ONCE(ctx->user_bufs[idx]);
-		io_req_set_rsrc_node(sr->notif, ctx);
-		ret = 0;
-	}
-	io_ring_submit_unlock(ctx, issue_flags);
 
-	if (unlikely(ret))
-		return ret;
+		ret = io_import_fixed(ITER_SOURCE, &kmsg->msg.msg_iter, imu,
+					(u64)(uintptr_t)sr->buf, sr->len);
+		if (unlikely(ret))
+			return ret;
+	}
 
-	ret = io_import_fixed(ITER_SOURCE, &kmsg->msg.msg_iter, imu,
-				(u64)(uintptr_t)sr->buf, sr->len);
-	if (unlikely(ret))
-		return ret;
 	kmsg->msg.sg_from_iter = io_sg_from_iter;
 	return 0;
 }
@@ -619,6 +622,16 @@  static int __io_send_import(struct io_kiocb *req, struct buf_sel_arg *arg,
 	struct io_async_msghdr *kmsg = req->async_data;
 	int ret = nsegs;
 
+	if (sr->flags & IORING_RECVSEND_FIXED_BUF) {
+		io_ring_submit_lock(req->ctx, issue_flags);
+		io_req_set_rsrc_node(sr->notif, req->ctx);
+		ret = __io_send_zc_import(req, kmsg, nsegs);
+		io_ring_submit_unlock(req->ctx, issue_flags);
+		if (unlikely(ret < 0))
+			return ret;
+		return nsegs;
+	}
+
 	if (nsegs == 1) {
 		sr->buf = arg->iovs[0].iov_base;
 		ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len,
@@ -646,10 +659,13 @@  static int io_send_import(struct io_kiocb *req, unsigned int issue_flags)
 			.nr_vecs = 1,
 		};
 
+		if (sr->flags & IORING_RECVSEND_FIXED_BUF)
+			arg.mode |= KBUF_MODE_BVEC;
+
 		if (kmsg->free_iov) {
 			arg.nr_vecs = kmsg->free_iov_nr;
 			arg.iovs = kmsg->free_iov;
-			arg.mode = KBUF_MODE_FREE;
+			arg.mode |= KBUF_MODE_FREE;
 		}
 
 		if (!(sr->flags & IORING_RECVSEND_BUNDLE))
@@ -1280,7 +1296,8 @@  void io_send_zc_cleanup(struct io_kiocb *req)
 	}
 }
 
-#define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF)
+#define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | \
+			    IORING_RECVSEND_FIXED_BUF | IORING_RECVSEND_BUNDLE)
 #define IO_ZC_FLAGS_VALID  (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE)
 
 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
@@ -1399,8 +1416,13 @@  static int io_send_zc_import(struct io_kiocb *req, unsigned int issue_flags)
 	struct io_async_msghdr *kmsg = req->async_data;
 	int ret;
 
+	ret = io_send_import(req, issue_flags);
+	if (unlikely(ret < 0))
+		return ret;
+	if (req->flags & REQ_F_BUFFER_SELECT)
+		return 0;
 	if (sr->flags & IORING_RECVSEND_FIXED_BUF)
-		return io_send_zc_import_single(req, issue_flags);
+		return __io_send_zc_import(req, kmsg, 1);
 
 	ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter);
 	if (unlikely(ret))
@@ -1416,6 +1438,7 @@  int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
 {
 	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
 	struct io_async_msghdr *kmsg = req->async_data;
+	unsigned int cflags;
 	struct socket *sock;
 	unsigned msg_flags;
 	int ret, min_ret = 0;
@@ -1476,7 +1499,8 @@  int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
 		io_notif_flush(zc->notif);
 		io_req_msg_cleanup(req, 0);
 	}
-	io_req_set_res(req, ret, IORING_CQE_F_MORE);
+	cflags = io_put_kbuf(req, ret, issue_flags);
+	io_req_set_res(req, ret, cflags | IORING_CQE_F_MORE);
 	return IOU_OK;
 }
 
diff --git a/io_uring/net.h b/io_uring/net.h
index 52bfee05f06a..e052762cf85d 100644
--- a/io_uring/net.h
+++ b/io_uring/net.h
@@ -5,9 +5,15 @@ 
 
 struct io_async_msghdr {
 #if defined(CONFIG_NET)
-	struct iovec			fast_iov;
+	union {
+		struct iovec		fast_iov;
+		struct bio_vec		fast_bvec;
+	};
 	/* points to an allocated iov, if NULL we use fast_iov instead */
-	struct iovec			*free_iov;
+	union {
+		struct iovec		*free_iov;
+		struct bio_vec		*free_bvec;
+	};
 	int				free_iov_nr;
 	int				namelen;
 	__kernel_size_t			controllen;
diff --git a/io_uring/opdef.c b/io_uring/opdef.c
index a2be3bbca5ff..6203a7dd5052 100644
--- a/io_uring/opdef.c
+++ b/io_uring/opdef.c
@@ -422,6 +422,7 @@  const struct io_issue_def io_issue_defs[] = {
 		.needs_file		= 1,
 		.unbound_nonreg_file	= 1,
 		.pollout		= 1,
+		.buffer_select		= 1,
 		.audit_skip		= 1,
 		.ioprio			= 1,
 #if defined(CONFIG_NET)