diff mbox series

[v5,2/6] block: Pass op_flags into blk_queue_get_max_sectors()

Message ID 157969068296.174869.13461609442947913096.stgit@localhost.localdomain (mailing list archive)
State New, archived
Headers show
Series block: Introduce REQ_ALLOCATE flag for REQ_OP_WRITE_ZEROES | expand

Commit Message

Kirill Tkhai Jan. 22, 2020, 10:58 a.m. UTC
This preparation patch changes argument type, and now
the function takes full op_flags instead of just op code.

Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
---
 block/blk-core.c       |    4 ++--
 include/linux/blkdev.h |    8 +++++---
 2 files changed, 7 insertions(+), 5 deletions(-)

Comments

Bob Liu Jan. 25, 2020, 2:37 a.m. UTC | #1
On 1/22/20 6:58 PM, Kirill Tkhai wrote:
> This preparation patch changes argument type, and now
> the function takes full op_flags instead of just op code.
> 
> Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
> ---
>  block/blk-core.c       |    4 ++--
>  include/linux/blkdev.h |    8 +++++---
>  2 files changed, 7 insertions(+), 5 deletions(-)
> 
> diff --git a/block/blk-core.c b/block/blk-core.c
> index 50a5de025d5e..ac2634bcda1f 100644
> --- a/block/blk-core.c
> +++ b/block/blk-core.c
> @@ -1250,10 +1250,10 @@ EXPORT_SYMBOL(submit_bio);
>  static int blk_cloned_rq_check_limits(struct request_queue *q,
>  				      struct request *rq)
>  {
> -	if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) {
> +	if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) {
>  		printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
>  			__func__, blk_rq_sectors(rq),
> -			blk_queue_get_max_sectors(q, req_op(rq)));
> +			blk_queue_get_max_sectors(q, rq->cmd_flags));
>  		return -EIO;
>  	}
>  
> diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
> index 0f1127d0b043..23a5850f35f6 100644
> --- a/include/linux/blkdev.h
> +++ b/include/linux/blkdev.h
> @@ -989,8 +989,10 @@ static inline struct bio_vec req_bvec(struct request *rq)
>  }
>  
>  static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
> -						     int op)
> +						     unsigned int op_flags)
>  {
> +	int op = op_flags & REQ_OP_MASK;
> +

Nitpick. int op = req_op(rq);

Anyway, looks good to me.
Reviewed-by: Bob Liu <bob.liu@oracle.com>

>  	if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
>  		return min(q->limits.max_discard_sectors,
>  			   UINT_MAX >> SECTOR_SHIFT);
> @@ -1029,10 +1031,10 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
>  	if (!q->limits.chunk_sectors ||
>  	    req_op(rq) == REQ_OP_DISCARD ||
>  	    req_op(rq) == REQ_OP_SECURE_ERASE)
> -		return blk_queue_get_max_sectors(q, req_op(rq));
> +		return blk_queue_get_max_sectors(q, rq->cmd_flags);
>  
>  	return min(blk_max_size_offset(q, offset),
> -			blk_queue_get_max_sectors(q, req_op(rq)));
> +			blk_queue_get_max_sectors(q, rq->cmd_flags));
>  }
>  
>  static inline unsigned int blk_rq_count_bios(struct request *rq)
> 
>
Kirill Tkhai Jan. 27, 2020, 10:08 a.m. UTC | #2
On 25.01.2020 05:37, Bob Liu wrote:
> On 1/22/20 6:58 PM, Kirill Tkhai wrote:
>> This preparation patch changes argument type, and now
>> the function takes full op_flags instead of just op code.
>>
>> Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
>> ---
>>  block/blk-core.c       |    4 ++--
>>  include/linux/blkdev.h |    8 +++++---
>>  2 files changed, 7 insertions(+), 5 deletions(-)
>>
>> diff --git a/block/blk-core.c b/block/blk-core.c
>> index 50a5de025d5e..ac2634bcda1f 100644
>> --- a/block/blk-core.c
>> +++ b/block/blk-core.c
>> @@ -1250,10 +1250,10 @@ EXPORT_SYMBOL(submit_bio);
>>  static int blk_cloned_rq_check_limits(struct request_queue *q,
>>  				      struct request *rq)
>>  {
>> -	if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) {
>> +	if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) {
>>  		printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
>>  			__func__, blk_rq_sectors(rq),
>> -			blk_queue_get_max_sectors(q, req_op(rq)));
>> +			blk_queue_get_max_sectors(q, rq->cmd_flags));
>>  		return -EIO;
>>  	}
>>  
>> diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
>> index 0f1127d0b043..23a5850f35f6 100644
>> --- a/include/linux/blkdev.h
>> +++ b/include/linux/blkdev.h
>> @@ -989,8 +989,10 @@ static inline struct bio_vec req_bvec(struct request *rq)
>>  }
>>  
>>  static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
>> -						     int op)
>> +						     unsigned int op_flags)
>>  {
>> +	int op = op_flags & REQ_OP_MASK;
>> +
> 
> Nitpick. int op = req_op(rq);
> 
> Anyway, looks good to me.
> Reviewed-by: Bob Liu <bob.liu@oracle.com>

Thanks, Bob. I'll merge this nitpick and your "Reviewed-by" at next resend.
It will be after merge window is closed, and new patches are welcomed.

>>  	if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
>>  		return min(q->limits.max_discard_sectors,
>>  			   UINT_MAX >> SECTOR_SHIFT);
>> @@ -1029,10 +1031,10 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
>>  	if (!q->limits.chunk_sectors ||
>>  	    req_op(rq) == REQ_OP_DISCARD ||
>>  	    req_op(rq) == REQ_OP_SECURE_ERASE)
>> -		return blk_queue_get_max_sectors(q, req_op(rq));
>> +		return blk_queue_get_max_sectors(q, rq->cmd_flags);
>>  
>>  	return min(blk_max_size_offset(q, offset),
>> -			blk_queue_get_max_sectors(q, req_op(rq)));
>> +			blk_queue_get_max_sectors(q, rq->cmd_flags));
>>  }
>>  
>>  static inline unsigned int blk_rq_count_bios(struct request *rq)
>>
>>
>
diff mbox series

Patch

diff --git a/block/blk-core.c b/block/blk-core.c
index 50a5de025d5e..ac2634bcda1f 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1250,10 +1250,10 @@  EXPORT_SYMBOL(submit_bio);
 static int blk_cloned_rq_check_limits(struct request_queue *q,
 				      struct request *rq)
 {
-	if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) {
+	if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) {
 		printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
 			__func__, blk_rq_sectors(rq),
-			blk_queue_get_max_sectors(q, req_op(rq)));
+			blk_queue_get_max_sectors(q, rq->cmd_flags));
 		return -EIO;
 	}
 
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 0f1127d0b043..23a5850f35f6 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -989,8 +989,10 @@  static inline struct bio_vec req_bvec(struct request *rq)
 }
 
 static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
-						     int op)
+						     unsigned int op_flags)
 {
+	int op = op_flags & REQ_OP_MASK;
+
 	if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
 		return min(q->limits.max_discard_sectors,
 			   UINT_MAX >> SECTOR_SHIFT);
@@ -1029,10 +1031,10 @@  static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
 	if (!q->limits.chunk_sectors ||
 	    req_op(rq) == REQ_OP_DISCARD ||
 	    req_op(rq) == REQ_OP_SECURE_ERASE)
-		return blk_queue_get_max_sectors(q, req_op(rq));
+		return blk_queue_get_max_sectors(q, rq->cmd_flags);
 
 	return min(blk_max_size_offset(q, offset),
-			blk_queue_get_max_sectors(q, req_op(rq)));
+			blk_queue_get_max_sectors(q, rq->cmd_flags));
 }
 
 static inline unsigned int blk_rq_count_bios(struct request *rq)