@@ -3236,8 +3236,10 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
if (set->nr_maps > HCTX_TYPE_POLL &&
- set->map[HCTX_TYPE_POLL].nr_queues)
+ set->map[HCTX_TYPE_POLL].nr_queues) {
+ blk_queue_flag_set(QUEUE_FLAG_POLL_CAPABLE, q);
blk_queue_flag_set(QUEUE_FLAG_POLL, q);
+ }
q->sg_reserved_size = INT_MAX;
@@ -431,8 +431,7 @@ static ssize_t queue_poll_store(struct request_queue *q, const char *page,
unsigned long poll_on;
ssize_t ret;
- if (!q->tag_set || q->tag_set->nr_maps <= HCTX_TYPE_POLL ||
- !q->tag_set->map[HCTX_TYPE_POLL].nr_queues)
+ if (!test_bit(QUEUE_FLAG_POLL_CAPABLE, &q->queue_flags))
return -EINVAL;
ret = queue_var_store(&poll_on, page, count);
@@ -600,6 +600,7 @@ struct request_queue {
/* Keep blk_queue_flag_name[] in sync with the definitions below */
#define QUEUE_FLAG_STOPPED 0 /* queue is stopped */
#define QUEUE_FLAG_DYING 1 /* queue being torn down */
+#define QUEUE_FLAG_POLL_CAPABLE 2 /* IO polling supported */
#define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */
#define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */
#define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */
Add a new flag to prepare for bio based stacking drivers that support polling. Signed-off-by: Christoph Hellwig <hch@lst.de> --- block/blk-mq.c | 4 +++- block/blk-sysfs.c | 3 +-- include/linux/blkdev.h | 1 + 3 files changed, 5 insertions(+), 3 deletions(-)