diff mbox series

[v2,5/6] block: add QUEUE_FLAG_POLL_CAP flag

Message ID 20210125121340.70459-6-jefflexu@linux.alibaba.com (mailing list archive)
State New, archived
Headers show
Series dm: support IO polling for bio-based dm device | expand

Commit Message

Jingbo Xu Jan. 25, 2021, 12:13 p.m. UTC
Introduce QUEUE_FLAG_POLL_CAP flag representing if the request queue
capable of polling or not.

Signed-off-by: Jeffle Xu <jefflexu@linux.alibaba.com>
---
 block/blk-mq.c         | 2 +-
 block/blk-sysfs.c      | 3 +--
 include/linux/blkdev.h | 6 ++++++
 3 files changed, 8 insertions(+), 3 deletions(-)

Comments

Mike Snitzer Jan. 27, 2021, 5:13 p.m. UTC | #1
On Mon, Jan 25 2021 at  7:13am -0500,
Jeffle Xu <jefflexu@linux.alibaba.com> wrote:

> Introduce QUEUE_FLAG_POLL_CAP flag representing if the request queue
> capable of polling or not.
> 
> Signed-off-by: Jeffle Xu <jefflexu@linux.alibaba.com>

Why are you adding QUEUE_FLAG_POLL_CAP?  Doesn't seem as though DM or
anything else actually needs it.

Mike
Jingbo Xu Jan. 28, 2021, 2:07 a.m. UTC | #2
On 1/28/21 1:13 AM, Mike Snitzer wrote:
> On Mon, Jan 25 2021 at  7:13am -0500,
> Jeffle Xu <jefflexu@linux.alibaba.com> wrote:
> 
>> Introduce QUEUE_FLAG_POLL_CAP flag representing if the request queue
>> capable of polling or not.
>>
>> Signed-off-by: Jeffle Xu <jefflexu@linux.alibaba.com>
> 
> Why are you adding QUEUE_FLAG_POLL_CAP?  Doesn't seem as though DM or
> anything else actually needs it.

Users can switch on/off polling on device via
'/sys/block/<dev>/queue/io_poll' at runtime. The requisite for turning
on polling is that the device is **capable** of polling. For mq devices,
the requisite is that there's polling hw queue for the device, i.e.,

```
q->tag_set->nr_maps > HCTX_TYPE_POLL &&
q->tag_set->map[HCTX_TYPE_POLL].nr_queues
```

But for dm devices, we need to check if all the underlying devices
support polling or not. Without this newly added queue flag, we need to
check again every time users want to turn on polling via 'io_poll', and
thus the dm layer need to export one interface to block layer, checking
if all the underlying target devices support polling or not, maybe just
like the iopoll() method we did in patch 3. Something like,

```
 struct block_device_operations {
+	bool (*support_iopoll)(struct request_queue *q);
```

The newly added queue flag 'QUEUE_FLAG_POLL_CAP' is just used as a cache
representing if the device **capable** of polling, while the original
queue flag 'QUEUE_FLAG_POLL' representing if polling is turned on for
this device **currently**.


But indeed we are short of queue flag resource. Adding a new queue flag
may not be the best resolution.

Any inspiration?
diff mbox series

Patch

diff --git a/block/blk-mq.c b/block/blk-mq.c
index d058b9cbdf76..10f06337d8dc 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -3203,7 +3203,7 @@  struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
 	q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
 	if (set->nr_maps > HCTX_TYPE_POLL &&
 	    set->map[HCTX_TYPE_POLL].nr_queues)
-		blk_queue_flag_set(QUEUE_FLAG_POLL, q);
+		q->queue_flags |= QUEUE_FLAG_POLL_MASK;
 
 	q->sg_reserved_size = INT_MAX;
 
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index b513f1683af0..65693efb7c76 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -420,8 +420,7 @@  static ssize_t queue_poll_store(struct request_queue *q, const char *page,
 	unsigned long poll_on;
 	ssize_t ret;
 
-	if (!q->tag_set || q->tag_set->nr_maps <= HCTX_TYPE_POLL ||
-	    !q->tag_set->map[HCTX_TYPE_POLL].nr_queues)
+	if (!blk_queue_poll_cap(q))
 		return -EINVAL;
 
 	ret = queue_var_store(&poll_on, page, count);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index bc540df197cb..095b486de02f 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -621,11 +621,16 @@  struct request_queue {
 #define QUEUE_FLAG_RQ_ALLOC_TIME 27	/* record rq->alloc_time_ns */
 #define QUEUE_FLAG_HCTX_ACTIVE	28	/* at least one blk-mq hctx is active */
 #define QUEUE_FLAG_NOWAIT       29	/* device supports NOWAIT */
+#define QUEUE_FLAG_POLL_CAP	30	/* capable of IO polling */
 
 #define QUEUE_FLAG_MQ_DEFAULT	((1 << QUEUE_FLAG_IO_STAT) |		\
 				 (1 << QUEUE_FLAG_SAME_COMP) |		\
 				 (1 << QUEUE_FLAG_NOWAIT))
 
+#define QUEUE_FLAG_POLL_MASK	((1 << QUEUE_FLAG_POLL) |		\
+				 (1<< QUEUE_FLAG_POLL_CAP))
+
+
 void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
@@ -667,6 +672,7 @@  bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
 #define blk_queue_fua(q)	test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)
 #define blk_queue_registered(q)	test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
 #define blk_queue_nowait(q)	test_bit(QUEUE_FLAG_NOWAIT, &(q)->queue_flags)
+#define blk_queue_poll_cap(q)	test_bit(QUEUE_FLAG_POLL_CAP, &(q)->queue_flags)
 
 extern void blk_set_pm_only(struct request_queue *q);
 extern void blk_clear_pm_only(struct request_queue *q);