diff mbox series

[v6,4/5] block: rename __blk_mq_alloc_rq_map

Message ID 61e07b3980c19287e4a1d12b8e2a3b26e262ea0b.1588856361.git.zhangweiping@didiglobal.com (mailing list archive)
State New, archived
Headers show
Series Fix potential kernel panic when increase hardware queue | expand

Commit Message

Weiping Zhang May 7, 2020, 1:04 p.m. UTC
rename __blk_mq_alloc_rq_map to __blk_mq_alloc_map_and_request,
actually it alloc both map and request, make function name
align with function.

Reviewed-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Weiping Zhang <zhangweiping@didiglobal.com>
---
 block/blk-mq.c | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

Comments

Hannes Reinecke May 7, 2020, 3:12 p.m. UTC | #1
On 5/7/20 3:04 PM, Weiping Zhang wrote:
> rename __blk_mq_alloc_rq_map to __blk_mq_alloc_map_and_request,
> actually it alloc both map and request, make function name
> align with function.
> 
> Reviewed-by: Ming Lei <ming.lei@redhat.com>
> Signed-off-by: Weiping Zhang <zhangweiping@didiglobal.com>
> ---
>   block/blk-mq.c | 7 ++++---
>   1 file changed, 4 insertions(+), 3 deletions(-)
> 
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index c6ba94cba17d..0a4f7fdd2248 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -2473,7 +2473,8 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
>   	}
>   }
>   
> -static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx)
> +static bool __blk_mq_alloc_map_and_request(struct blk_mq_tag_set *set,
> +					int hctx_idx)
>   {
>   	int ret = 0;
>   
> @@ -2532,7 +2533,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
>   			hctx_idx = set->map[j].mq_map[i];
>   			/* unmapped hw queue can be remapped after CPU topo changed */
>   			if (!set->tags[hctx_idx] &&
> -			    !__blk_mq_alloc_rq_map(set, hctx_idx)) {
> +			    !__blk_mq_alloc_map_and_request(set, hctx_idx)) {
>   				/*
>   				 * If tags initialization fail for some hctx,
>   				 * that hctx won't be brought online.  In this
> @@ -2988,7 +2989,7 @@ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
>   	int i;
>   
>   	for (i = 0; i < set->nr_hw_queues; i++)
> -		if (!__blk_mq_alloc_rq_map(set, i))
> +		if (!__blk_mq_alloc_map_and_request(set, i))
>   			goto out_unwind;
>   
>   	return 0;
> 
Reviewed-by: Hannes Reinecke <hare@suse.de>

Cheers,

Hannes
diff mbox series

Patch

diff --git a/block/blk-mq.c b/block/blk-mq.c
index c6ba94cba17d..0a4f7fdd2248 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2473,7 +2473,8 @@  static void blk_mq_init_cpu_queues(struct request_queue *q,
 	}
 }
 
-static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx)
+static bool __blk_mq_alloc_map_and_request(struct blk_mq_tag_set *set,
+					int hctx_idx)
 {
 	int ret = 0;
 
@@ -2532,7 +2533,7 @@  static void blk_mq_map_swqueue(struct request_queue *q)
 			hctx_idx = set->map[j].mq_map[i];
 			/* unmapped hw queue can be remapped after CPU topo changed */
 			if (!set->tags[hctx_idx] &&
-			    !__blk_mq_alloc_rq_map(set, hctx_idx)) {
+			    !__blk_mq_alloc_map_and_request(set, hctx_idx)) {
 				/*
 				 * If tags initialization fail for some hctx,
 				 * that hctx won't be brought online.  In this
@@ -2988,7 +2989,7 @@  static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
 	int i;
 
 	for (i = 0; i < set->nr_hw_queues; i++)
-		if (!__blk_mq_alloc_rq_map(set, i))
+		if (!__blk_mq_alloc_map_and_request(set, i))
 			goto out_unwind;
 
 	return 0;