@@ -474,6 +474,10 @@ struct io_uring_task {
atomic_t inflight_tracked;
atomic_t in_idle;
+#ifdef CONFIG_BLOCK
+ struct bio_alloc_cache bio_cache;
+#endif
+
spinlock_t task_lock;
struct io_wq_work_list task_list;
unsigned long task_state;
@@ -2268,6 +2272,8 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
if (READ_ONCE(req->result) == -EAGAIN && resubmit &&
!(req->flags & REQ_F_DONT_REISSUE)) {
req->iopoll_completed = 0;
+ /* Don't use cache for async retry, not locking safe */
+ req->rw.kiocb.ki_flags &= ~IOCB_ALLOC_CACHE;
req_ref_get(req);
io_req_task_queue_reissue(req);
continue;
@@ -2675,6 +2681,29 @@ static bool io_file_supports_nowait(struct io_kiocb *req, int rw)
return __io_file_supports_nowait(req->file, rw);
}
+static void io_mark_alloc_cache(struct kiocb *kiocb)
+{
+#ifdef CONFIG_BLOCK
+ struct block_device *bdev = NULL;
+
+ if (S_ISBLK(file_inode(kiocb->ki_filp)->i_mode))
+ bdev = I_BDEV(kiocb->ki_filp->f_mapping->host);
+ else if (S_ISREG(file_inode(kiocb->ki_filp)->i_mode))
+ bdev = kiocb->ki_filp->f_inode->i_sb->s_bdev;
+
+ /*
+ * If the lower level device doesn't support polled IO, then
+ * we cannot safely use the alloc cache. This really should
+ * be a failure case for polled IO...
+ */
+ if (!bdev ||
+ !test_bit(QUEUE_FLAG_POLL, &bdev_get_queue(bdev)->queue_flags))
+ return;
+
+ kiocb->ki_flags |= IOCB_ALLOC_CACHE;
+#endif /* CONFIG_BLOCK */
+}
+
static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_ring_ctx *ctx = req->ctx;
@@ -2717,6 +2746,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return -EOPNOTSUPP;
kiocb->ki_flags |= IOCB_HIPRI;
+ io_mark_alloc_cache(kiocb);
kiocb->ki_complete = io_complete_rw_iopoll;
req->iopoll_completed = 0;
} else {
@@ -2783,6 +2813,8 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
if (check_reissue && (req->flags & REQ_F_REISSUE)) {
req->flags &= ~REQ_F_REISSUE;
if (io_resubmit_prep(req)) {
+ /* Don't use cache for async retry, not locking safe */
+ req->rw.kiocb.ki_flags &= ~IOCB_ALLOC_CACHE;
req_ref_get(req);
io_req_task_queue_reissue(req);
} else {
@@ -7966,10 +7998,17 @@ static int io_uring_alloc_task_context(struct task_struct *task,
return ret;
}
+#ifdef CONFIG_BLOCK
+ bio_alloc_cache_init(&tctx->bio_cache);
+#endif
+
tctx->io_wq = io_init_wq_offload(ctx, task);
if (IS_ERR(tctx->io_wq)) {
ret = PTR_ERR(tctx->io_wq);
percpu_counter_destroy(&tctx->inflight);
+#ifdef CONFIG_BLOCK
+ bio_alloc_cache_destroy(&tctx->bio_cache);
+#endif
kfree(tctx);
return ret;
}
@@ -7993,6 +8032,10 @@ void __io_uring_free(struct task_struct *tsk)
WARN_ON_ONCE(tctx->io_wq);
WARN_ON_ONCE(tctx->cached_refs);
+#ifdef CONFIG_BLOCK
+ bio_alloc_cache_destroy(&tctx->bio_cache);
+#endif
+
percpu_counter_destroy(&tctx->inflight);
kfree(tctx);
tsk->io_uring = NULL;
@@ -10247,6 +10290,15 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
return ret;
}
+struct bio_alloc_cache *io_uring_bio_cache(void)
+{
+#ifdef CONFIG_BLOCK
+ if (current->io_uring)
+ return ¤t->io_uring->bio_cache;
+#endif
+ return NULL;
+}
+
static int __init io_uring_init(void)
{
#define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
@@ -11,6 +11,7 @@ struct bio_alloc_cache;
struct sock *io_uring_get_socket(struct file *file);
void __io_uring_cancel(struct files_struct *files);
void __io_uring_free(struct task_struct *tsk);
+struct bio_alloc_cache *io_uring_bio_cache(void);
static inline void io_uring_files_cancel(struct files_struct *files)
{
@@ -40,11 +41,10 @@ static inline void io_uring_files_cancel(struct files_struct *files)
static inline void io_uring_free(struct task_struct *tsk)
{
}
-#endif
-
static inline struct bio_alloc_cache *io_uring_bio_cache(void)
{
return NULL;
}
+#endif
#endif
Initialize a bio allocation cache, and mark it as being used for IOPOLL. We could use it for non-polled IO as well, but it'd need some locking and probably would negate much of the win in that case. We start with IOPOLL, as completions are locked by the ctx lock anyway. So no further locking is needed there. This brings an IOPOLL gen2 Optane QD=128 workload from ~3.0M IOPS to ~3.25M IOPS. Signed-off-by: Jens Axboe <axboe@kernel.dk> --- fs/io_uring.c | 52 ++++++++++++++++++++++++++++++++++++++++ include/linux/io_uring.h | 4 ++-- 2 files changed, 54 insertions(+), 2 deletions(-)