@@ -3951,7 +3951,7 @@ static bool blk_mq_poll_hybrid(struct request_queue *q, blk_qc_t qc)
}
static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie,
- bool spin)
+ unsigned int flags)
{
struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie);
long state = current->state;
@@ -3974,7 +3974,7 @@ static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie,
if (current->state == TASK_RUNNING)
return 1;
- if (ret < 0 || !spin)
+ if (ret < 0 || (flags & BLK_POLL_ONESHOT))
break;
cpu_relax();
} while (!need_resched());
@@ -3987,15 +3987,13 @@ static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie,
* blk_poll - poll for IO completions
* @q: the queue
* @cookie: cookie passed back at IO submission time
- * @spin: whether to spin for completions
+ * @flags: BLK_POLL_* flags that control the behavior
*
* Description:
* Poll for completions on the passed in queue. Returns number of
- * completed entries found. If @spin is true, then blk_poll will continue
- * looping until at least one completion is found, unless the task is
- * otherwise marked running (or we need to reschedule).
+ * completed entries found.
*/
-int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
+int blk_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags)
{
if (cookie == BLK_QC_T_NONE ||
!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
@@ -4004,12 +4002,11 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
if (current->plug)
blk_flush_plug_list(current->plug, false);
- /* If specified not to spin, we also should not sleep. */
- if (spin && q->poll_nsec != BLK_MQ_POLL_CLASSIC) {
+ if (q->poll_nsec != BLK_MQ_POLL_CLASSIC) {
if (blk_mq_poll_hybrid(q, cookie))
return 1;
}
- return blk_mq_poll_classic(q, cookie, spin);
+ return blk_mq_poll_classic(q, cookie, flags);
}
EXPORT_SYMBOL_GPL(blk_poll);
@@ -1032,7 +1032,7 @@ static void nvme_execute_rq_polled(struct request_queue *q,
blk_execute_rq_nowait(bd_disk, rq, at_head, nvme_end_sync_rq);
while (!completion_done(&wait)) {
- blk_poll(q, request_to_qc_t(rq->mq_hctx, rq), true);
+ blk_poll(q, request_to_qc_t(rq->mq_hctx, rq), 0);
cond_resched();
}
}
@@ -286,7 +286,7 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
if (!READ_ONCE(bio.bi_private))
break;
if (!(iocb->ki_flags & IOCB_HIPRI) ||
- !blk_poll(bdev_get_queue(bdev), qc, true))
+ !blk_poll(bdev_get_queue(bdev), qc, 0))
blk_io_schedule();
}
__set_current_state(TASK_RUNNING);
@@ -319,12 +319,12 @@ struct blkdev_dio {
static struct bio_set blkdev_dio_pool;
-static int blkdev_iopoll(struct kiocb *kiocb, bool wait)
+static int blkdev_iopoll(struct kiocb *kiocb, unsigned int flags)
{
struct block_device *bdev = I_BDEV(kiocb->ki_filp->f_mapping->host);
struct request_queue *q = bdev_get_queue(bdev);
- return blk_poll(q, READ_ONCE(kiocb->ki_cookie), wait);
+ return blk_poll(q, READ_ONCE(kiocb->ki_cookie), flags);
}
static void blkdev_bio_end_io(struct bio *bio)
@@ -475,7 +475,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
if (!READ_ONCE(dio->waiter))
break;
- if (!do_poll || !blk_poll(bdev_get_queue(bdev), qc, true))
+ if (!do_poll || !blk_poll(bdev_get_queue(bdev), qc, 0))
blk_io_schedule();
}
__set_current_state(TASK_RUNNING);
@@ -2290,18 +2290,18 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
long min)
{
+ unsigned int poll_flags = 0;
struct io_kiocb *req, *tmp;
LIST_HEAD(done);
- bool spin;
- int ret;
+ int ret = 0;
/*
* Only spin for completions if we don't have multiple devices hanging
* off our complete list, and we're under the requested amount.
*/
- spin = !ctx->poll_multi_file && *nr_events < min;
+ if (ctx->poll_multi_file || *nr_events >= min)
+ poll_flags |= BLK_POLL_ONESHOT;
- ret = 0;
list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
struct kiocb *kiocb = &req->rw.kiocb;
@@ -2317,7 +2317,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
if (!list_empty(&done))
break;
- ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
+ ret = kiocb->ki_filp->f_op->iopoll(kiocb, poll_flags);
if (ret < 0)
break;
@@ -2325,8 +2325,8 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
if (READ_ONCE(req->iopoll_completed))
list_move_tail(&req->inflight_entry, &done);
- if (ret && spin)
- spin = false;
+ if (ret)
+ poll_flags |= BLK_POLL_ONESHOT;
ret = 0;
}
@@ -49,13 +49,13 @@ struct iomap_dio {
};
};
-int iomap_dio_iopoll(struct kiocb *kiocb, bool spin)
+int iomap_dio_iopoll(struct kiocb *kiocb, unsigned int flags)
{
struct request_queue *q = READ_ONCE(kiocb->private);
if (!q)
return 0;
- return blk_poll(q, READ_ONCE(kiocb->ki_cookie), spin);
+ return blk_poll(q, READ_ONCE(kiocb->ki_cookie), flags);
}
EXPORT_SYMBOL_GPL(iomap_dio_iopoll);
@@ -640,7 +640,7 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
if (!(iocb->ki_flags & IOCB_HIPRI) ||
!dio->submit.last_queue ||
!blk_poll(dio->submit.last_queue,
- dio->submit.cookie, true))
+ dio->submit.cookie, 0))
blk_io_schedule();
}
__set_current_state(TASK_RUNNING);
@@ -946,7 +946,9 @@ extern const char *blk_op_str(unsigned int op);
int blk_status_to_errno(blk_status_t status);
blk_status_t errno_to_blk_status(int errno);
-int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin);
+/* only poll the hardware once, don't continue until a completion was found */
+#define BLK_POLL_ONESHOT (1 << 0)
+int blk_poll(struct request_queue *q, blk_qc_t cookie, unsigned int flags);
static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
{
@@ -2026,7 +2026,7 @@ struct file_operations {
ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
- int (*iopoll)(struct kiocb *kiocb, bool spin);
+ int (*iopoll)(struct kiocb *kiocb, unsigned int flags);
int (*iterate) (struct file *, struct dir_context *);
int (*iterate_shared) (struct file *, struct dir_context *);
__poll_t (*poll) (struct file *, struct poll_table_struct *);
@@ -275,7 +275,7 @@ struct iomap_dio *__iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
unsigned int dio_flags);
ssize_t iomap_dio_complete(struct iomap_dio *dio);
-int iomap_dio_iopoll(struct kiocb *kiocb, bool spin);
+int iomap_dio_iopoll(struct kiocb *kiocb, unsigned int flags);
#ifdef CONFIG_SWAP
struct file;
@@ -428,7 +428,7 @@ int swap_readpage(struct page *page, bool synchronous)
if (!READ_ONCE(bio->bi_private))
break;
- if (!blk_poll(disk->queue, qc, true))
+ if (!blk_poll(disk->queue, qc, 0))
blk_io_schedule();
}
__set_current_state(TASK_RUNNING);