diff mbox series

[for-next,3/6] io_uring: introduce io_req_cqe_overflow()

Message ID 048b9fbcce56814d77a1a540409c98c3d383edcb.1655455613.git.asml.silence@gmail.com (mailing list archive)
State New
Headers show
Series clean up __io_fill_cqe_req() | expand

Commit Message

Pavel Begunkov June 17, 2022, 8:48 a.m. UTC
__io_fill_cqe_req() is hot and inlined, we want it to be as small as
possible. Add io_req_cqe_overflow() accepting only a request and doing
all overflow accounting, and replace with it two calls to 6 argument
io_cqring_event_overflow().

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 io_uring/io_uring.c | 15 +++++++++++++--
 io_uring/io_uring.h | 12 ++----------
 2 files changed, 15 insertions(+), 12 deletions(-)
diff mbox series

Patch

diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index a3b1339335c5..263d7e4f1b41 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -640,8 +640,8 @@  static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
 	}
 }
 
-bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, s32 res,
-			      u32 cflags, u64 extra1, u64 extra2)
+static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
+				     s32 res, u32 cflags, u64 extra1, u64 extra2)
 {
 	struct io_overflow_cqe *ocqe;
 	size_t ocq_size = sizeof(struct io_overflow_cqe);
@@ -678,6 +678,17 @@  bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, s32 res,
 	return true;
 }
 
+bool io_req_cqe_overflow(struct io_kiocb *req)
+{
+	if (!(req->flags & REQ_F_CQE32_INIT)) {
+		req->extra1 = 0;
+		req->extra2 = 0;
+	}
+	return io_cqring_event_overflow(req->ctx, req->cqe.user_data,
+					req->cqe.res, req->cqe.flags,
+					req->extra1, req->extra2);
+}
+
 /*
  * writes to the cq entry need to come after reading head; the
  * control dependency is enough as we're using WRITE_ONCE to
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 51032a494aec..668fff18d3cc 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -17,8 +17,7 @@  enum {
 };
 
 struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx);
-bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, s32 res,
-			      u32 cflags, u64 extra1, u64 extra2);
+bool io_req_cqe_overflow(struct io_kiocb *req);
 
 static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
 {
@@ -58,10 +57,6 @@  static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
 			memcpy(cqe, &req->cqe, sizeof(*cqe));
 			return true;
 		}
-
-		return io_cqring_event_overflow(ctx, req->cqe.user_data,
-						req->cqe.res, req->cqe.flags,
-						0, 0);
 	} else {
 		u64 extra1 = 0, extra2 = 0;
 
@@ -85,11 +80,8 @@  static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
 			WRITE_ONCE(cqe->big_cqe[1], extra2);
 			return true;
 		}
-
-		return io_cqring_event_overflow(ctx, req->cqe.user_data,
-				req->cqe.res, req->cqe.flags,
-				extra1, extra2);
 	}
+	return io_req_cqe_overflow(req);
 }
 
 static inline void req_set_fail(struct io_kiocb *req)