diff mbox series

[2/3] io_uring/net: add IORING_SEND_IGNORE_INLINE support to send/sendmsg

Message ID 20241014205416.456078-3-axboe@kernel.dk (mailing list archive)
State New
Headers show
Series Add ability to ignore inline completions | expand

Commit Message

Jens Axboe Oct. 14, 2024, 8:49 p.m. UTC
If IORING_SEND_IGNORE_INLINE is set for a send/sendmsg request, then a
successful inline completion of such a request will be ignored for a
submit_and_wait() type of submissions. In other words, if an application
submits a send for socketA with a recv for socketB, it can now do:

io_uring_submit_and_wait(ring, 1);

and have the inline send completion be ignored from the number of items
to wait for.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 include/uapi/linux/io_uring.h | 7 +++++++
 io_uring/net.c                | 9 ++++++++-
 2 files changed, 15 insertions(+), 1 deletion(-)
diff mbox series

Patch

diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 1967f5ab2317..e026ade027c1 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -362,12 +362,19 @@  enum io_uring_op {
  *				the starting buffer ID in cqe->flags as per
  *				usual for provided buffer usage. The buffers
  *				will be	contigious from the starting buffer ID.
+ *
+ * IORING_SEND_IGNORE_INLINE	If set for a send[msg] request, then the
+ *				completion will NOT be included in the count
+ *				waited for by an application, if completed
+ *				inline as part of submission. It will still
+ *				generate a CQE.
  */
 #define IORING_RECVSEND_POLL_FIRST	(1U << 0)
 #define IORING_RECV_MULTISHOT		(1U << 1)
 #define IORING_RECVSEND_FIXED_BUF	(1U << 2)
 #define IORING_SEND_ZC_REPORT_USAGE	(1U << 3)
 #define IORING_RECVSEND_BUNDLE		(1U << 4)
+#define IORING_SEND_IGNORE_INLINE	(1U << 5)
 
 /*
  * cqe.res for IORING_CQE_F_NOTIF if
diff --git a/io_uring/net.c b/io_uring/net.c
index 18507658a921..11ff58a5c145 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -400,7 +400,8 @@  static int io_sendmsg_prep_setup(struct io_kiocb *req, int is_msg)
 	return ret;
 }
 
-#define SENDMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_BUNDLE)
+#define SENDMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_BUNDLE | \
+			IORING_SEND_IGNORE_INLINE)
 
 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
@@ -434,6 +435,8 @@  int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 		sr->buf_group = req->buf_index;
 		req->buf_list = NULL;
 	}
+	if (sr->flags & IORING_SEND_IGNORE_INLINE)
+		req->flags |= REQ_F_IGNORE_INLINE;
 
 #ifdef CONFIG_COMPAT
 	if (req->ctx->compat)
@@ -550,6 +553,8 @@  int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
 	ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
 
 	if (ret < min_ret) {
+		req->flags &= ~REQ_F_IGNORE_INLINE;
+
 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
 			return -EAGAIN;
 		if (ret > 0 && io_net_retry(sock, flags)) {
@@ -647,6 +652,8 @@  int io_send(struct io_kiocb *req, unsigned int issue_flags)
 	kmsg->msg.msg_flags = flags;
 	ret = sock_sendmsg(sock, &kmsg->msg);
 	if (ret < min_ret) {
+		req->flags &= ~REQ_F_IGNORE_INLINE;
+
 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
 			return -EAGAIN;