diff mbox series

[v2] block: add a new flag BLK_MQ_POLL_CLASSIC for hybrid poll

Message ID 20190301012855.137660-1-yuyufen@huawei.com (mailing list archive)
State New, archived
Headers show
Series [v2] block: add a new flag BLK_MQ_POLL_CLASSIC for hybrid poll | expand

Commit Message

Yufen Yu March 1, 2019, 1:28 a.m. UTC
For q->poll_nsec == -1, means doing classic poll, not hybrid poll.
We introduce a new flag BLK_MQ_POLL_CLASSIC to replace -1, which
may make code much easier to read.

Signed-off-by: Yufen Yu <yuyufen@huawei.com>
---
 block/blk-mq.c         | 4 ++--
 block/blk-sysfs.c      | 4 ++--
 include/linux/blkdev.h | 3 +++
 3 files changed, 7 insertions(+), 4 deletions(-)

Comments

Yufen Yu March 11, 2019, 1:31 p.m. UTC | #1
ping?


On 2019/3/1 9:28, Yufen Yu wrote:
> For q->poll_nsec == -1, means doing classic poll, not hybrid poll.
> We introduce a new flag BLK_MQ_POLL_CLASSIC to replace -1, which
> may make code much easier to read.
>
> Signed-off-by: Yufen Yu <yuyufen@huawei.com>
> ---
>   block/blk-mq.c         | 4 ++--
>   block/blk-sysfs.c      | 4 ++--
>   include/linux/blkdev.h | 3 +++
>   3 files changed, 7 insertions(+), 4 deletions(-)
>
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index 9437a5eb07cf..e70202e3b378 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -2857,7 +2857,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
>   	/*
>   	 * Default to classic polling
>   	 */
> -	q->poll_nsec = -1;
> +	q->poll_nsec = BLK_MQ_POLL_CLASSIC;
>   
>   	blk_mq_init_cpu_queues(q, set->nr_hw_queues);
>   	blk_mq_add_queue_tag_set(set, q);
> @@ -3389,7 +3389,7 @@ static bool blk_mq_poll_hybrid(struct request_queue *q,
>   {
>   	struct request *rq;
>   
> -	if (q->poll_nsec == -1)
> +	if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
>   		return false;
>   
>   	if (!blk_qc_t_is_internal(cookie))
> diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
> index 590d1ef2f961..8639e135687e 100644
> --- a/block/blk-sysfs.c
> +++ b/block/blk-sysfs.c
> @@ -360,7 +360,7 @@ static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
>   {
>   	int val;
>   
> -	if (q->poll_nsec == -1)
> +	if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
>   		val = -1;
>   	else
>   		val = q->poll_nsec / 1000;
> @@ -381,7 +381,7 @@ static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
>   		return err;
>   
>   	if (val == -1)
> -		q->poll_nsec = -1;
> +		q->poll_nsec = BLK_MQ_POLL_CLASSIC;
>   	else
>   		q->poll_nsec = val * 1000;
>   
> diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
> index 338604dff7d0..f9c2ebf5e621 100644
> --- a/include/linux/blkdev.h
> +++ b/include/linux/blkdev.h
> @@ -50,6 +50,9 @@ struct blk_stat_callback;
>   /* Must be consistent with blk_mq_poll_stats_bkt() */
>   #define BLK_MQ_POLL_STATS_BKTS 16
>   
> +/* Doing classic polling */
> +#define BLK_MQ_POLL_CLASSIC -1
> +
>   /*
>    * Maximum number of blkcg policies allowed to be registered concurrently.
>    * Defined here to simplify include dependency.
Damien Le Moal March 11, 2019, 1:51 p.m. UTC | #2
On 2019/03/11 6:31, yuyufen wrote:
> ping?
> 
> 
> On 2019/3/1 9:28, Yufen Yu wrote:
>> For q->poll_nsec == -1, means doing classic poll, not hybrid poll.
>> We introduce a new flag BLK_MQ_POLL_CLASSIC to replace -1, which
>> may make code much easier to read.
>>
>> Signed-off-by: Yufen Yu <yuyufen@huawei.com>
>> ---
>>   block/blk-mq.c         | 4 ++--
>>   block/blk-sysfs.c      | 4 ++--
>>   include/linux/blkdev.h | 3 +++
>>   3 files changed, 7 insertions(+), 4 deletions(-)
>>
>> diff --git a/block/blk-mq.c b/block/blk-mq.c
>> index 9437a5eb07cf..e70202e3b378 100644
>> --- a/block/blk-mq.c
>> +++ b/block/blk-mq.c
>> @@ -2857,7 +2857,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
>>   	/*
>>   	 * Default to classic polling
>>   	 */
>> -	q->poll_nsec = -1;
>> +	q->poll_nsec = BLK_MQ_POLL_CLASSIC;
>>   
>>   	blk_mq_init_cpu_queues(q, set->nr_hw_queues);
>>   	blk_mq_add_queue_tag_set(set, q);
>> @@ -3389,7 +3389,7 @@ static bool blk_mq_poll_hybrid(struct request_queue *q,
>>   {
>>   	struct request *rq;
>>   
>> -	if (q->poll_nsec == -1)
>> +	if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
>>   		return false;
>>   
>>   	if (!blk_qc_t_is_internal(cookie))
>> diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
>> index 590d1ef2f961..8639e135687e 100644
>> --- a/block/blk-sysfs.c
>> +++ b/block/blk-sysfs.c
>> @@ -360,7 +360,7 @@ static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
>>   {
>>   	int val;
>>   
>> -	if (q->poll_nsec == -1)
>> +	if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
>>   		val = -1;
>>   	else
>>   		val = q->poll_nsec / 1000;
>> @@ -381,7 +381,7 @@ static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
>>   		return err;
>>   
>>   	if (val == -1)

Since val is an int obtained with kstrtoint(), val can be a negative value other
than -1. So what about something like:

	if (val == BLK_MQ_POLL_CLASSIC)
		q->poll_nsec = BLK_MQ_POLL_CLASSIC;
	else if (val >= 0)
		q->poll_nsec = val * 1000;
	else
		return -EINVAL;

That would avoid problems with negative values other than -1.

>> -		q->poll_nsec = -1;
>> +		q->poll_nsec = BLK_MQ_POLL_CLASSIC;
>>   	else
>>   		q->poll_nsec = val * 1000;
>>   
>> diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
>> index 338604dff7d0..f9c2ebf5e621 100644
>> --- a/include/linux/blkdev.h
>> +++ b/include/linux/blkdev.h
>> @@ -50,6 +50,9 @@ struct blk_stat_callback;
>>   /* Must be consistent with blk_mq_poll_stats_bkt() */
>>   #define BLK_MQ_POLL_STATS_BKTS 16
>>   
>> +/* Doing classic polling */
>> +#define BLK_MQ_POLL_CLASSIC -1
>> +
>>   /*
>>    * Maximum number of blkcg policies allowed to be registered concurrently.
>>    * Defined here to simplify include dependency.
Yufen Yu March 15, 2019, 3:20 a.m. UTC | #3
Hi,


On 2019/3/11 21:51, Damien Le Moal wrote:
> On 2019/03/11 6:31, yuyufen wrote:
>> ping?
>>
>>
>> On 2019/3/1 9:28, Yufen Yu wrote:
>>> For q->poll_nsec == -1, means doing classic poll, not hybrid poll.
>>> We introduce a new flag BLK_MQ_POLL_CLASSIC to replace -1, which
>>> may make code much easier to read.
>>>
>>> Signed-off-by: Yufen Yu <yuyufen@huawei.com>
>>> ---
>>>    block/blk-mq.c         | 4 ++--
>>>    block/blk-sysfs.c      | 4 ++--
>>>    include/linux/blkdev.h | 3 +++
>>>    3 files changed, 7 insertions(+), 4 deletions(-)
>>>
>>> diff --git a/block/blk-mq.c b/block/blk-mq.c
>>> index 9437a5eb07cf..e70202e3b378 100644
>>> --- a/block/blk-mq.c
>>> +++ b/block/blk-mq.c
>>> @@ -2857,7 +2857,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
>>>    	/*
>>>    	 * Default to classic polling
>>>    	 */
>>> -	q->poll_nsec = -1;
>>> +	q->poll_nsec = BLK_MQ_POLL_CLASSIC;
>>>    
>>>    	blk_mq_init_cpu_queues(q, set->nr_hw_queues);
>>>    	blk_mq_add_queue_tag_set(set, q);
>>> @@ -3389,7 +3389,7 @@ static bool blk_mq_poll_hybrid(struct request_queue *q,
>>>    {
>>>    	struct request *rq;
>>>    
>>> -	if (q->poll_nsec == -1)
>>> +	if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
>>>    		return false;
>>>    
>>>    	if (!blk_qc_t_is_internal(cookie))
>>> diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
>>> index 590d1ef2f961..8639e135687e 100644
>>> --- a/block/blk-sysfs.c
>>> +++ b/block/blk-sysfs.c
>>> @@ -360,7 +360,7 @@ static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
>>>    {
>>>    	int val;
>>>    
>>> -	if (q->poll_nsec == -1)
>>> +	if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
>>>    		val = -1;
>>>    	else
>>>    		val = q->poll_nsec / 1000;
>>> @@ -381,7 +381,7 @@ static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
>>>    		return err;
>>>    
>>>    	if (val == -1)
> Since val is an int obtained with kstrtoint(), val can be a negative value other
> than -1. So what about something like:
>
> 	if (val == BLK_MQ_POLL_CLASSIC)
> 		q->poll_nsec = BLK_MQ_POLL_CLASSIC;
> 	else if (val >= 0)
> 		q->poll_nsec = val * 1000;
> 	else
> 		return -EINVAL;
>
> That would avoid problems with negative values other than -1.

I agree with you. This may be more reasonable.
Thanks a lot for your suggestion.

Yufen.
Thanks

>>> -		q->poll_nsec = -1;
>>> +		q->poll_nsec = BLK_MQ_POLL_CLASSIC;
>>>    	else
>>>    		q->poll_nsec = val * 1000;
>>>    
>>> diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
>>> index 338604dff7d0..f9c2ebf5e621 100644
>>> --- a/include/linux/blkdev.h
>>> +++ b/include/linux/blkdev.h
>>> @@ -50,6 +50,9 @@ struct blk_stat_callback;
>>>    /* Must be consistent with blk_mq_poll_stats_bkt() */
>>>    #define BLK_MQ_POLL_STATS_BKTS 16
>>>    
>>> +/* Doing classic polling */
>>> +#define BLK_MQ_POLL_CLASSIC -1
>>> +
>>>    /*
>>>     * Maximum number of blkcg policies allowed to be registered concurrently.
>>>     * Defined here to simplify include dependency.
diff mbox series

Patch

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 9437a5eb07cf..e70202e3b378 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2857,7 +2857,7 @@  struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
 	/*
 	 * Default to classic polling
 	 */
-	q->poll_nsec = -1;
+	q->poll_nsec = BLK_MQ_POLL_CLASSIC;
 
 	blk_mq_init_cpu_queues(q, set->nr_hw_queues);
 	blk_mq_add_queue_tag_set(set, q);
@@ -3389,7 +3389,7 @@  static bool blk_mq_poll_hybrid(struct request_queue *q,
 {
 	struct request *rq;
 
-	if (q->poll_nsec == -1)
+	if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
 		return false;
 
 	if (!blk_qc_t_is_internal(cookie))
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 590d1ef2f961..8639e135687e 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -360,7 +360,7 @@  static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
 {
 	int val;
 
-	if (q->poll_nsec == -1)
+	if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
 		val = -1;
 	else
 		val = q->poll_nsec / 1000;
@@ -381,7 +381,7 @@  static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
 		return err;
 
 	if (val == -1)
-		q->poll_nsec = -1;
+		q->poll_nsec = BLK_MQ_POLL_CLASSIC;
 	else
 		q->poll_nsec = val * 1000;
 
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 338604dff7d0..f9c2ebf5e621 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -50,6 +50,9 @@  struct blk_stat_callback;
 /* Must be consistent with blk_mq_poll_stats_bkt() */
 #define BLK_MQ_POLL_STATS_BKTS 16
 
+/* Doing classic polling */
+#define BLK_MQ_POLL_CLASSIC -1
+
 /*
  * Maximum number of blkcg policies allowed to be registered concurrently.
  * Defined here to simplify include dependency.