diff mbox series

[3/4] io_uring: wire up bio allocation cache

Message ID 20210809212401.19807-4-axboe@kernel.dk (mailing list archive)
State New, archived
Headers show
Series Enable bio recycling for polled IO | expand

Commit Message

Jens Axboe Aug. 9, 2021, 9:24 p.m. UTC
Initialize a bio allocation cache, and mark it as being used for
IOPOLL. We could use it for non-polled IO as well, but it'd need some
locking and probably would negate much of the win in that case.

We start with IOPOLL, as completions are locked by the ctx lock anyway.
So no further locking is needed there.

This brings an IOPOLL gen2 Optane QD=128 workload from ~3.0M IOPS to
~3.25M IOPS.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 fs/io_uring.c            | 52 ++++++++++++++++++++++++++++++++++++++++
 include/linux/io_uring.h |  4 ++--
 2 files changed, 54 insertions(+), 2 deletions(-)

Comments

Kanchan Joshi Aug. 10, 2021, 12:25 p.m. UTC | #1
On Tue, Aug 10, 2021 at 6:40 AM Jens Axboe <axboe@kernel.dk> wrote:
>
> Initialize a bio allocation cache, and mark it as being used for
> IOPOLL. We could use it for non-polled IO as well, but it'd need some
> locking and probably would negate much of the win in that case.

For regular (non-polled) IO, will it make sense to tie a bio-cache to
each fixed-buffer slot (ctx->user_bufs array).
One bio cache (along with the lock) per slot. That may localize the
lock contention. And it will happen only when multiple IOs are spawned
from the same fixed-buffer concurrently?

> We start with IOPOLL, as completions are locked by the ctx lock anyway.
> So no further locking is needed there.
>
> This brings an IOPOLL gen2 Optane QD=128 workload from ~3.0M IOPS to
> ~3.25M IOPS.
Jens Axboe Aug. 10, 2021, 1:50 p.m. UTC | #2
On 8/10/21 6:25 AM, Kanchan Joshi wrote:
> On Tue, Aug 10, 2021 at 6:40 AM Jens Axboe <axboe@kernel.dk> wrote:
>>
>> Initialize a bio allocation cache, and mark it as being used for
>> IOPOLL. We could use it for non-polled IO as well, but it'd need some
>> locking and probably would negate much of the win in that case.
> 
> For regular (non-polled) IO, will it make sense to tie a bio-cache to
> each fixed-buffer slot (ctx->user_bufs array).
> One bio cache (along with the lock) per slot. That may localize the
> lock contention. And it will happen only when multiple IOs are spawned
> from the same fixed-buffer concurrently?

I don't think it's worth it - the slub overhead is already pretty low,
basically turning into a cmpxchg16 for the fast path. But that's a big
enough hit for polled IO of this magnitude that it's worth getting rid
of.

I've attempted bio caches before for non-polled, but the lock + irq
dance required for them just means it ends up being moot. Or even if
you have per-cpu caches, just doing irq enable/disable means you're
back at the same perf where you started, except now you've got extra
code...

Here's an example from a few years ago:

https://git.kernel.dk/cgit/linux-block/log/?h=cpu-alloc-cache
diff mbox series

Patch

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 91a301bb1644..1d94a434b348 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -474,6 +474,10 @@  struct io_uring_task {
 	atomic_t		inflight_tracked;
 	atomic_t		in_idle;
 
+#ifdef CONFIG_BLOCK
+	struct bio_alloc_cache	bio_cache;
+#endif
+
 	spinlock_t		task_lock;
 	struct io_wq_work_list	task_list;
 	unsigned long		task_state;
@@ -2268,6 +2272,8 @@  static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
 		if (READ_ONCE(req->result) == -EAGAIN && resubmit &&
 		    !(req->flags & REQ_F_DONT_REISSUE)) {
 			req->iopoll_completed = 0;
+			/* Don't use cache for async retry, not locking safe */
+			req->rw.kiocb.ki_flags &= ~IOCB_ALLOC_CACHE;
 			req_ref_get(req);
 			io_req_task_queue_reissue(req);
 			continue;
@@ -2675,6 +2681,29 @@  static bool io_file_supports_nowait(struct io_kiocb *req, int rw)
 	return __io_file_supports_nowait(req->file, rw);
 }
 
+static void io_mark_alloc_cache(struct kiocb *kiocb)
+{
+#ifdef CONFIG_BLOCK
+	struct block_device *bdev = NULL;
+
+	if (S_ISBLK(file_inode(kiocb->ki_filp)->i_mode))
+		bdev = I_BDEV(kiocb->ki_filp->f_mapping->host);
+	else if (S_ISREG(file_inode(kiocb->ki_filp)->i_mode))
+		bdev = kiocb->ki_filp->f_inode->i_sb->s_bdev;
+
+	/*
+	 * If the lower level device doesn't support polled IO, then
+	 * we cannot safely use the alloc cache. This really should
+	 * be a failure case for polled IO...
+	 */
+	if (!bdev ||
+	    !test_bit(QUEUE_FLAG_POLL, &bdev_get_queue(bdev)->queue_flags))
+		return;
+
+	kiocb->ki_flags |= IOCB_ALLOC_CACHE;
+#endif /* CONFIG_BLOCK */
+}
+
 static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
 	struct io_ring_ctx *ctx = req->ctx;
@@ -2717,6 +2746,7 @@  static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 			return -EOPNOTSUPP;
 
 		kiocb->ki_flags |= IOCB_HIPRI;
+		io_mark_alloc_cache(kiocb);
 		kiocb->ki_complete = io_complete_rw_iopoll;
 		req->iopoll_completed = 0;
 	} else {
@@ -2783,6 +2813,8 @@  static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
 	if (check_reissue && (req->flags & REQ_F_REISSUE)) {
 		req->flags &= ~REQ_F_REISSUE;
 		if (io_resubmit_prep(req)) {
+			/* Don't use cache for async retry, not locking safe */
+			req->rw.kiocb.ki_flags &= ~IOCB_ALLOC_CACHE;
 			req_ref_get(req);
 			io_req_task_queue_reissue(req);
 		} else {
@@ -7966,10 +7998,17 @@  static int io_uring_alloc_task_context(struct task_struct *task,
 		return ret;
 	}
 
+#ifdef CONFIG_BLOCK
+	bio_alloc_cache_init(&tctx->bio_cache);
+#endif
+
 	tctx->io_wq = io_init_wq_offload(ctx, task);
 	if (IS_ERR(tctx->io_wq)) {
 		ret = PTR_ERR(tctx->io_wq);
 		percpu_counter_destroy(&tctx->inflight);
+#ifdef CONFIG_BLOCK
+		bio_alloc_cache_destroy(&tctx->bio_cache);
+#endif
 		kfree(tctx);
 		return ret;
 	}
@@ -7993,6 +8032,10 @@  void __io_uring_free(struct task_struct *tsk)
 	WARN_ON_ONCE(tctx->io_wq);
 	WARN_ON_ONCE(tctx->cached_refs);
 
+#ifdef CONFIG_BLOCK
+	bio_alloc_cache_destroy(&tctx->bio_cache);
+#endif
+
 	percpu_counter_destroy(&tctx->inflight);
 	kfree(tctx);
 	tsk->io_uring = NULL;
@@ -10247,6 +10290,15 @@  SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
 	return ret;
 }
 
+struct bio_alloc_cache *io_uring_bio_cache(void)
+{
+#ifdef CONFIG_BLOCK
+	if (current->io_uring)
+		return &current->io_uring->bio_cache;
+#endif
+	return NULL;
+}
+
 static int __init io_uring_init(void)
 {
 #define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
index 2fb53047638e..a9bab9bd51d1 100644
--- a/include/linux/io_uring.h
+++ b/include/linux/io_uring.h
@@ -11,6 +11,7 @@  struct bio_alloc_cache;
 struct sock *io_uring_get_socket(struct file *file);
 void __io_uring_cancel(struct files_struct *files);
 void __io_uring_free(struct task_struct *tsk);
+struct bio_alloc_cache *io_uring_bio_cache(void);
 
 static inline void io_uring_files_cancel(struct files_struct *files)
 {
@@ -40,11 +41,10 @@  static inline void io_uring_files_cancel(struct files_struct *files)
 static inline void io_uring_free(struct task_struct *tsk)
 {
 }
-#endif
-
 static inline struct bio_alloc_cache *io_uring_bio_cache(void)
 {
 	return NULL;
 }
+#endif
 
 #endif