diff mbox series

[2/6] io_uring: move poll_refs up a cacheline to fill a hole

Message ID 20230419160759.568904-3-axboe@kernel.dk (mailing list archive)
State New, archived
Headers show
Series Enable NO_OFFLOAD support | expand

Commit Message

Jens Axboe April 19, 2023, 4:07 p.m. UTC
After bumping the flags to 64-bits, we now have two holes in io_kiocb.
The best candidate for moving is poll_refs, as not to split the task_work
related items.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 include/linux/io_uring_types.h | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 84f436cc6509..4dd54d2173e1 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -535,6 +535,9 @@  struct io_kiocb {
 	 * and after selection it points to the buffer ID itself.
 	 */
 	u16				buf_index;
+
+	atomic_t			poll_refs;
+
 	u64				flags;
 
 	struct io_cqe			cqe;
@@ -565,9 +568,8 @@  struct io_kiocb {
 		__poll_t apoll_events;
 	};
 	atomic_t			refs;
-	atomic_t			poll_refs;
-	struct io_task_work		io_task_work;
 	unsigned			nr_tw;
+	struct io_task_work		io_task_work;
 	/* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
 	union {
 		struct hlist_node	hash_node;